Added normals to prediction DataObject
This commit is contained in:
parent
efba70f19a
commit
74de208831
@ -135,6 +135,7 @@ class CustomShapeNet(InMemoryDataset):
|
||||
else:
|
||||
# Get the y - Label
|
||||
if self.mode != 'predict':
|
||||
# TODO: This is shady function, elaborate on it
|
||||
y_raw = next(i for i, v in enumerate(self.categories.keys()) if v.lower() in element.lower())
|
||||
y_all = [y_raw] * points.shape[0]
|
||||
else:
|
||||
@ -187,15 +188,14 @@ class ShapeNetPartSegDataset(Dataset):
|
||||
|
||||
def __getitem__(self, index):
|
||||
data = self.dataset[index]
|
||||
points, labels = data.pos, data.y # , data.points, data.norm
|
||||
|
||||
# Resample to fixed number of points
|
||||
try:
|
||||
choice = np.random.choice(points.shape[0], self.npoints, replace=True)
|
||||
choice = np.random.choice(data.pos.shape[0], self.npoints, replace=True)
|
||||
except ValueError:
|
||||
choice = []
|
||||
|
||||
points, labels = points[choice, :], labels[choice]
|
||||
points, labels = data.pos[choice, :], data.y[choice]
|
||||
|
||||
labels -= 1 if self.num_classes() in labels else 0 # Map label from [1, C] to [0, C-1]
|
||||
|
||||
@ -204,7 +204,8 @@ class ShapeNetPartSegDataset(Dataset):
|
||||
'labels': labels # torch.Tensor (n,)
|
||||
}
|
||||
if self.mode == 'predict':
|
||||
sample.update(normals=data.normals)
|
||||
normals = data.normals[choice]
|
||||
sample.update(normals=normals)
|
||||
|
||||
return sample
|
||||
|
||||
|
@ -32,17 +32,15 @@ def clusterToColor(cluster, cluster_idx):
|
||||
return colors
|
||||
|
||||
|
||||
def read_pointcloud(path):
|
||||
file = open(path)
|
||||
def read_pointcloud(path, delimiter=' ', hasHeader=True):
|
||||
with open(path, 'r') as f:
|
||||
if hasHeader:
|
||||
# Get rid of the Header
|
||||
_ = f.readline()
|
||||
# This itrates over all lines, splits them anc converts values to floats. Will fail on wrong values.
|
||||
pc = [[float(x) for x in line.rstrip().split(delimiter)] for line in f if line != '']
|
||||
|
||||
header = file.readline()
|
||||
num_points = int(header.split()[0])
|
||||
pc = []
|
||||
|
||||
for i in range(num_points):
|
||||
pc.append(list(float(s) for s in file.readline().split()))
|
||||
|
||||
return np.array(pc)
|
||||
return np.asarray(pc)
|
||||
|
||||
|
||||
def write_pointcloud(file, pc, numCols=6):
|
||||
|
@ -1,3 +1,4 @@
|
||||
2049 6
|
||||
0.0759953 -0.395683 -0.130632 -0.871549 -0.481571 0.0921502
|
||||
-0.364005 0.664317 0.169368 -0.997143 0.00181983 -0.0755164
|
||||
0.415995 0.424317 -0.150632 0.920812 -0.223131 -0.319865
|
||||
|
@ -260,9 +260,9 @@ if __name__ == '__main__':
|
||||
dataset_folder = './data/raw/predict/'
|
||||
pointcloud_file = './pointclouds/0_pc.xyz'
|
||||
|
||||
pc = pc.read_pointcloud(pointcloud_file)
|
||||
pc = normalize_pointcloud(pc)
|
||||
pc = append_normal_angles(pc)
|
||||
pcloud = pc.read_pointcloud(pointcloud_file)
|
||||
pcloud = normalize_pointcloud(pcloud)
|
||||
pcloud = append_normal_angles(pcloud)
|
||||
|
||||
# pc = StandardScaler().fit_transform(pc)
|
||||
|
||||
@ -270,10 +270,10 @@ if __name__ == '__main__':
|
||||
|
||||
# Add full point cloud to prediction folder.
|
||||
recreate_folder(dataset_folder + '0_0' + '/')
|
||||
pc_fps = farthest_point_sampling(pc, opt.npoints)
|
||||
pc_fps = farthest_point_sampling(pcloud, opt.npoints)
|
||||
pc.write_pointcloud(dataset_folder + '0_0' + '/pc.xyz', pc_fps)
|
||||
|
||||
pc_clusters = extract_cube_clusters(pc, [4,4,4], 1024)
|
||||
pc_clusters = extract_cube_clusters(pcloud, [4,4,4], 1024)
|
||||
#pc_clusters = extract_clusters(pc, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
|
||||
# Add cluster point clouds to prediction folder.
|
||||
for idx, pcc in enumerate(pc_clusters):
|
||||
|
Loading…
x
Reference in New Issue
Block a user