Merge remote-tracking branch 'origin/master'

# Conflicts:
#	predict/predict.py
This commit is contained in:
Si11ium 2019-08-06 14:37:01 +02:00
commit 877375e7b3
6 changed files with 2628 additions and 30 deletions

136
dataset/primgen.py Normal file
View File

@ -0,0 +1,136 @@
import subprocess as sp
import subprocess
import glob, os
import shutil
import pointcloud as pc
modelType = "off"
samplingRate = 0.003
maxDistance = 0.003
maxAngleDistance = 0.01
errorSigma = 0
k = 16
pointCloudSize = 2048
cutOutProb = 0
maxIterations = 10
toolPath = "C:/Projekte/csg_playground_build/Release/primgen.exe"
def getExtension(modType):
if("off" in modType):
return "off"
elif("obj" in modType):
return "obj"
elif("csg" in modType):
return ".json"
else:
return ""
def run(modelPath, outputFolder):
executable = "{} {} \"{}\" \"{}\" {} {} {} {} {} {} {} {}".format(
toolPath, modelType, modelPath, outputFolder,
samplingRate, maxDistance, maxAngleDistance, errorSigma,
k, pointCloudSize, cutOutProb, maxIterations)
print("Call generator with " + executable)
p = sp.Popen(executable, stdout=subprocess.PIPE)
for line in p.stdout:
print(line)
# Wait until process terminates (without using p.wait())
# while p.poll() is None:
# Process hasn't exited yet, let's wait some
# time.sleep(0.5)
# Get return code from process
return_code = p.returncode
if return_code == -1:
print("Unable to generate primtitives for model {}".format(modelPath));
else:
print("Done. Exist Code: " + str(return_code))
def extract_clusters(outputFolder, cluster_method="none", **kwargs):
if "none" in cluster_method:
return
all_clusters = []
for subdir, dirs, _ in os.walk(outputFolder):
for dir in dirs:
path = os.path.join(subdir, dir)
for _,_,files in os.walk(path):
for file in files:
file = os.path.join(path, file.lower())
print(file)
if file.endswith("_pc.xyz"):
pointcloud = pc.read_pointcloud(file);
clusters = []
if "per_primitive" in cluster_method:
clusters = pc.cluster_per_column(pointcloud, column=7) # primitive id column
elif "cube" in cluster_method:
clusters = pc.cluster_cubes(pointcloud,
kwargs.get("cluster_dims", [1,1,1]))
elif "dbscan" in cluster_method:
clusters = pc.cluster_dbscan(pointcloud,
selected_indices=kwargs.get("selected_indices", [0,1,2,3,4,5]),
eps=kwargs.get("eps", 0.1),
min_samples=kwargs.get("min_samples", None)
)
for idx, cluster in enumerate(clusters):
pos = file.rfind("pc.xyz")
new_file = file[:pos] + str(idx) + "_pc.xyz"
pc.write_pointcloud(new_file, cluster)
# all_clusters.extend(clusters)
return all_clusters
def runForFolder(modelFolder, outputFolder):
os.chdir(modelFolder)
modelFiles = glob.glob("*." + getExtension(modelType))
print("Working on {} model files with extension {} from folder {}.".format(len(modelFiles),getExtension(modelType), modelFolder))
folderIdx = 0
for modelFile in modelFiles:
try:
subfolderPath = outputFolder + str(folderIdx) + "/"
folderIdx += 1
print("Check if output sub folder exists...")
if os.path.exists(subfolderPath) and os.path.isdir(subfolderPath):
shutil.rmtree(subfolderPath)
print("Yes => Existing sub folder was deleted.")
os.mkdir(subfolderPath)
print("Successfully created the directory %s " % subfolderPath)
run(modelFolder + modelFile, subfolderPath)
except OSError as err:
print("Creation of the directory %s failed." % str(err))
if __name__ == "__main__":
outputFolder = "D:/output_0/"
modelFolder = "C:/Users/friedrich/PycharmProjects/data/models/"
# clusters = extract_clusters("C:/Projekte/csg_playground_build/testOFF", "cube",
# cluster_dims=[2,2,2])
# #eps=0.1, min_samples=0.01, selected_indices=[7])
# pc.draw_clusters(clusters)
runForFolder(modelFolder, outputFolder)

Binary file not shown.

157
pointcloud.py Normal file
View File

@ -0,0 +1,157 @@
import numpy as np
import open3d as o3d
from sklearn.cluster import DBSCAN
def mini_color_table(index, norm=True):
colors = [
[0.5000, 0.5400, 0.5300], [0.8900, 0.1500, 0.2100], [0.6400, 0.5800, 0.5000],
[1.0000, 0.3800, 0.0100], [1.0000, 0.6600, 0.1400], [0.4980, 1.0000, 0.0000],
[0.4980, 1.0000, 0.8314], [0.9412, 0.9725, 1.0000], [0.5412, 0.1686, 0.8863],
[0.5765, 0.4392, 0.8588], [0.3600, 0.1400, 0.4300], [0.5600, 0.3700, 0.6000],
]
color = colors[index % len(colors)]
if not norm:
color[0] *= 255
color[1] *= 255
color[2] *= 255
return color
def clusterToColor(cluster, cluster_idx):
colors = np.zeros(shape=(len(cluster), 3))
point_idx = 0
for point in cluster:
colors[point_idx, :] = mini_color_table(cluster_idx)
point_idx += 1
return colors
def read_pointcloud(path):
file = open(path)
header = file.readline()
num_points = int(header.split()[0])
pc = []
for i in range(num_points):
pc.append(list(float(s) for s in file.readline().split()))
return np.array(pc)
def write_pointcloud(file, pc, numCols=6):
np.savetxt(file, pc[:,:numCols], header=str(len(pc)) + ' ' + str(numCols), comments='')
def farthest_point_sampling(pts, K):
if pts.shape[0] < K:
return pts
def calc_distances(p0, points):
return ((p0[:3] - points[:, :3]) ** 2).sum(axis=1)
farthest_pts = np.zeros((K, pts.shape[1]))
farthest_pts[0] = pts[np.random.randint(len(pts))]
distances = calc_distances(farthest_pts[0], pts)
for i in range(1, K):
farthest_pts[i] = pts[np.argmax(distances)]
distances = np.minimum(distances, calc_distances(farthest_pts[i], pts))
return farthest_pts
def cluster_per_column(pc, column):
clusters = []
for i in range(0, int(np.max(pc[:, column]))):
cluster_pc = pc[pc[:, column] == i, :]
clusters.append(cluster_pc)
return clusters
def cluster_cubes(data, cluster_dims):
max = data[:,:3].max(axis=0)
max += max * 0.01
min = data[:,:3].min(axis=0)
min -= min * 0.01
size = (max - min)
clusters = {}
cluster_size = size / np.array(cluster_dims, dtype=np.float32)
print('Min: ' + str(min) + ' Max: ' + str(max))
print('Cluster Size: ' + str(cluster_size))
for row in data:
# print('Row: ' + str(row))
cluster_pos = ((row[:3] - min) / cluster_size).astype(int)
cluster_idx = cluster_dims[0] * cluster_dims[2] * cluster_pos[1] + cluster_dims[0] * cluster_pos[2] + cluster_pos[0]
clusters.setdefault(cluster_idx, []).append(row)
# Apply farthest point sampling to each cluster
for key, cluster in clusters.items():
c = np.vstack(cluster)
clusters[key] = c # farthest_point_sampling(c, max_points_per_cluster)
return clusters.values()
def cluster_dbscan(data, selected_indices, eps, min_samples, metric='euclidean', algo='auto'):
min_samples = min_samples * len(data);
print('Clustering. Min Samples: ' + str(min_samples) + ' EPS: ' + str(eps) + "Selected Indices: " + str(selected_indices))
# 0,1,2 : pos
# 3,4,5 : normal
# 6: type index
# 7,8,9,10: type index one hot encoded
# 11,12: normal as angles
db_res = DBSCAN(eps=eps, metric=metric, n_jobs=-1, algorithm=algo, min_samples=min_samples).fit(data[:, selected_indices])
labels = db_res.labels_
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
n_noise = list(labels).count(-1)
print("Noise: " + str(n_noise) + " Clusters: " + str(n_clusters))
clusters = {}
for idx, l in enumerate(labels):
if l is -1:
continue
clusters.setdefault(str(l), []).append(data[idx, :])
npClusters = []
for cluster in clusters.values():
npClusters.append(np.array(cluster))
return npClusters
def draw_clusters(clusters):
clouds = []
for cluster_idx, cluster in enumerate(clusters):
cloud = o3d.PointCloud()
cloud.points = o3d.Vector3dVector(cluster[:,:3])
cloud.colors = o3d.Vector3dVector(clusterToColor(cluster, cluster_idx))
clouds.append(cloud)
o3d.draw_geometries(clouds)

2048
predict/pointclouds/0_pc.xyz Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,244 @@
import sys
import os
import shutil
import math
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../') # add project root directory
from dataset.shapenet import ShapeNetPartSegDataset
from model.pointnet2_part_seg import PointNet2PartSegmentNet
import torch_geometric.transforms as GT
import torch
import argparse
from distutils.util import strtobool
import numpy as np
import argparse
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
import open3d as o3d
import pointcloud as pc
def eval_sample(net, sample):
'''
sample: { 'points': tensor(n, 3), 'labels': tensor(n,) }
return: (pred_label, gt_label) with labels shape (n,)
'''
net.eval()
with torch.no_grad():
# points: (n, 3)
points, gt_label = sample['points'], sample['labels']
n = points.shape[0]
points = points.view(1, n, 3) # make a batch
points = points.transpose(1, 2).contiguous()
points = points.to(device, dtype)
pred = net(points) # (batch_size, n, num_classes)
pred_label = pred.max(2)[1]
pred_label = pred_label.view(-1).cpu() # (n,)
assert pred_label.shape == gt_label.shape
return (pred_label, gt_label)
def mini_color_table(index, norm=True):
colors = [
[0.5000, 0.5400, 0.5300], [0.8900, 0.1500, 0.2100], [0.6400, 0.5800, 0.5000],
[1.0000, 0.3800, 0.0100], [1.0000, 0.6600, 0.1400], [0.4980, 1.0000, 0.0000],
[0.4980, 1.0000, 0.8314], [0.9412, 0.9725, 1.0000], [0.5412, 0.1686, 0.8863],
[0.5765, 0.4392, 0.8588], [0.3600, 0.1400, 0.4300], [0.5600, 0.3700, 0.6000],
]
color = colors[index % len(colors)]
if not norm:
color[0] *= 255
color[1] *= 255
color[2] *= 255
return color
def label2color(labels):
'''
labels: np.ndarray with shape (n, )
colors(return): np.ndarray with shape (n, 3)
'''
num = labels.shape[0]
colors = np.zeros((num, 3))
minl, maxl = np.min(labels), np.max(labels)
for l in range(minl, maxl + 1):
colors[labels == l, :] = mini_color_table(l)
return colors
def clusterToColor(cluster, cluster_idx):
colors = np.zeros(shape=(len(cluster), 3))
point_idx = 0
for point in cluster:
colors[point_idx, :] = mini_color_table(cluster_idx)
point_idx += 1
return colors
def normalize_pointcloud(pc):
max = pc.max(axis=0)
min = pc.min(axis=0)
f = np.max([abs(max[0] - min[0]), abs(max[1] - min[1]), abs(max[2] - min[2])])
pc[:, 0:3] /= f
pc[:, 3:6] /= (np.linalg.norm(pc[:, 3:6], ord=2, axis=1, keepdims=True))
return pc
def farthest_point_sampling(pts, K):
if pts.shape[0] < K:
return pts
def calc_distances(p0, points):
return ((p0[:3] - points[:, :3]) ** 2).sum(axis=1)
farthest_pts = np.zeros((K, pts.shape[1]))
farthest_pts[0] = pts[np.random.randint(len(pts))]
distances = calc_distances(farthest_pts[0], pts)
for i in range(1, K):
farthest_pts[i] = pts[np.argmax(distances)]
distances = np.minimum(distances, calc_distances(farthest_pts[i], pts))
return farthest_pts
def append_onehotencoded_type(data, factor = 1.0):
types = data[:, 6].astype(int)
res = np.zeros((len(types), 4))
res[np.arange(len(types)), types] = factor
return np.column_stack((data, res))
def append_normal_angles(data):
def func(x):
theta = math.acos(x[2]) / math.pi
phi = (math.atan2(x[1], x[0]) + math.pi) / (2.0 * math.pi)
return (theta, phi)
res = np.array([func(xi) for xi in data[:, 3:6]])
print(res)
return np.column_stack((data, res))
def extract_cube_clusters(data, cluster_dims, max_points_per_cluster):
max = data[:,:3].max(axis=0)
max += max * 0.01
min = data[:,:3].min(axis=0)
min -= min * 0.01
size = (max - min)
clusters = {}
cluster_size = size / np.array(cluster_dims, dtype=np.float32)
print('Min: ' + str(min) + ' Max: ' + str(max))
print('Cluster Size: ' + str(cluster_size))
for row in data:
# print('Row: ' + str(row))
cluster_pos = ((row[:3] - min) / cluster_size).astype(int)
cluster_idx = cluster_dims[0] * cluster_dims[2] * cluster_pos[1] + cluster_dims[0] * cluster_pos[2] + cluster_pos[0]
clusters.setdefault(cluster_idx, []).append(row)
# Apply farthest point sampling to each cluster
for key, cluster in clusters.items():
c = np.vstack(cluster)
clusters[key] = farthest_point_sampling(c, max_points_per_cluster)
return clusters.values()
def extract_clusters(data, selected_indices, eps, min_samples, metric='euclidean', algo='auto'):
min_samples = min_samples * len(data)
print('Clustering. Min Samples: ' + str(min_samples) + ' EPS: ' + str(eps))
# 0,1,2 : pos
# 3,4,5 : normal
# 6: type index
# 7,8,9,10: type index one hot encoded
# 11,12: normal as angles
db_res = DBSCAN(eps=eps, metric=metric, n_jobs=-1, algorithm=algo, min_samples=min_samples).fit(data[:, selected_indices])
labels = db_res.labels_
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
n_noise = list(labels).count(-1)
print("Noise: " + str(n_noise) + " Clusters: " + str(n_clusters))
clusters = {}
for idx, l in enumerate(labels):
if l is -1:
continue
clusters.setdefault(str(l), []).append(data[idx, :])
npClusters = []
for cluster in clusters.values():
npClusters.append(np.array(cluster))
return npClusters
def draw_clusters(clusters):
clouds = []
cluster_idx = 0
for cluster in clusters:
cloud = o3d.PointCloud()
cloud.points = o3d.Vector3dVector(cluster[:,:3])
cloud.colors = o3d.Vector3dVector(clusterToColor(cluster, cluster_idx))
clouds.append(cloud)
cluster_idx += 1
o3d.draw_geometries(clouds)
def draw_sample_data(sample_data, colored_normals = False):
cloud = o3d.PointCloud()
cloud.points = o3d.Vector3dVector(sample_data[:,:3])
cloud.colors = \
o3d.Vector3dVector(label2color(sample_data[:, 6].astype(int)) if not colored_normals else sample_data[:, 3:6])
o3d.draw_geometries([cloud])
def recreate_folder(folder):
if os.path.exists(folder) and os.path.isdir(folder):
shutil.rmtree(folder)
os.mkdir(folder)
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../') # add project root directory
##
parser = argparse.ArgumentParser()
parser.add_argument('--npoints', type=int, default=2048, help='resample points number')
parser.add_argument('--model', type=str, default='./checkpoint/seg_model_custom_3.pth', help='model path')
@ -27,9 +254,40 @@ print(opt)
if __name__ == '__main__':
# Create dataset
print('Create data set ..')
dataset_folder = './data/raw/predict/'
pointcloud_file = './pointclouds/0_pc.xyz'
pc = pc.read_pointcloud(pointcloud_file)
pc = normalize_pointcloud(pc)
pc = append_normal_angles(pc)
# pc = StandardScaler().fit_transform(pc)
recreate_folder(dataset_folder)
# Add full point cloud to prediction folder.
recreate_folder(dataset_folder + '0_0' + '/')
pc_fps = farthest_point_sampling(pc, opt.npoints)
pc.write_pointcloud(dataset_folder + '0_0' + '/pc.xyz', pc_fps)
pc_clusters = extract_cube_clusters(pc, [4,4,4], 1024)
#pc_clusters = extract_clusters(pc, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
# Add cluster point clouds to prediction folder.
for idx, pcc in enumerate(pc_clusters):
pcc = farthest_point_sampling(pcc, opt.npoints)
recreate_folder(dataset_folder + str(idx) + '/')
pc.write_pointcloud(dataset_folder + str(idx) + '/pc.xyz', pcc)
#draw_sample_data(pcc, False)
draw_clusters(pc_clusters)
# Load dataset
print('Construct dataset ..')
test_transform = GT.Compose([GT.NormalizeScale(),])
print('load dataset ..')
test_transform = GT.Compose([GT.NormalizeScale(), ])
test_dataset = ShapeNetPartSegDataset(
mode='predict',
@ -57,34 +315,33 @@ if __name__ == '__main__':
net = net.to(device, dtype)
net.eval()
##
def eval_sample(net, sample):
'''
sample: { 'points': tensor(n, 3), 'labels': tensor(n,) }
return: (pred_label, gt_label) with labels shape (n,)
'''
net.eval()
with torch.no_grad():
# points: (n, 3)
points, gt_label = sample['points'], sample['labels']
n = points.shape[0]
points = points.view(1, n, 3) # make a batch
points = points.transpose(1, 2).contiguous()
points = points.to(device, dtype)
pred = net(points) # (batch_size, n, num_classes)
pred_label = pred.max(2)[1]
pred_label = pred_label.view(-1).cpu() # (n,)
assert pred_label.shape == gt_label.shape
return (pred_label, gt_label)
result_clusters = []
# Iterate over all the samples
for sample in test_dataset:
print('Eval test sample ..')
pred_label, gt_label = eval_sample(net, sample)
print('Eval done ..')
sample_data = np.column_stack((sample["points"].numpy(), sample["normals"].numpy(), pred_label.numpy()))
print('Eval done.')
pred_labels = pred_label.numpy()
print(pred_labels)
sample_data = normalize_pointcloud(sample_data)
sample_data = append_onehotencoded_type(sample_data, 1.0)
sample_data = append_normal_angles(sample_data)
print('Clustering ..')
print('Shape: ' + str(sample_data.shape))
clusters = extract_clusters(sample_data, [0, 1, 2, 3, 4, 5, 7, 8, 9, 10], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
print('Clustering done. ' + str(len(clusters)) + " Clusters.")
print(sample_data[:, 6])
draw_sample_data(sample_data, False)
result_clusters.extend(clusters)
# result_clusters.append(sample_data)
#draw_clusters(result_clusters)

View File

@ -1,4 +1,4 @@
import open3d as o3d
# import open3d as o3d
import numpy as np