added type detection pipeline
This commit is contained in:
94
main_pipeline.py
Normal file
94
main_pipeline.py
Normal file
@ -0,0 +1,94 @@
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from torch_geometric.data import Data
|
||||
from tqdm import tqdm
|
||||
|
||||
import polyscope as ps
|
||||
import numpy as np
|
||||
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
# Dataset and Dataloaders
|
||||
# =============================================================================
|
||||
|
||||
# Transforms
|
||||
from ml_lib.point_toolset.point_io import BatchToData
|
||||
from ml_lib.utils.model_io import SavedLightningModels
|
||||
|
||||
|
||||
# Datasets
|
||||
from datasets.full_pointclouds import FullCloudsDataset
|
||||
from utils.pointcloud import read_pointcloud, normalize_pointcloud, cluster_cubes, append_onehotencoded_type, \
|
||||
label2color, polytopes_to_planes
|
||||
from utils.project_config import GlobalVar, ThisConfig
|
||||
|
||||
|
||||
def prepare_dataloader(config_obj):
|
||||
dataset = FullCloudsDataset(config_obj.data.root, split=GlobalVar.data_split.test,
|
||||
setting=GlobalVar.settings[config_obj.model.type])
|
||||
# noinspection PyTypeChecker
|
||||
return DataLoader(dataset, batch_size=config_obj.train.batch_size,
|
||||
num_workers=config_obj.data.worker, shuffle=False)
|
||||
|
||||
|
||||
def restore_logger_and_model(log_dir):
|
||||
model = SavedLightningModels.load_checkpoint(models_root_path=log_dir, n=-1)
|
||||
model = model.restore()
|
||||
if torch.cuda.is_available():
|
||||
model.cuda()
|
||||
else:
|
||||
model.cpu()
|
||||
return model
|
||||
|
||||
def predict_prim_type(input_pc, model):
|
||||
|
||||
input_data = (
|
||||
torch.tensor(np.array([input_pc[:, 3:6]], np.float)),
|
||||
torch.tensor(input_pc[:, 0:3]),
|
||||
np.zeros(input_pc.shape[0])
|
||||
)
|
||||
|
||||
batch_to_data = BatchToData()
|
||||
|
||||
data = batch_to_data(input_data[0], input_data[1], input_data[2])
|
||||
y = loaded_model(data.to(device='cuda' if torch.cuda.is_available() else 'cpu'))
|
||||
y_primary = torch.argmax(y.main_out, dim=-1).squeeze().cpu().numpy()
|
||||
|
||||
return np.concatenate((input_pc, y_primary.reshape(-1,1)), axis=1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
input_pc_path = 'data/pc/pc.txt'
|
||||
|
||||
model_path = Path('trained_models/version_1')
|
||||
config_filename = 'config.ini'
|
||||
config = ThisConfig()
|
||||
config.read_file((Path(model_path) / config_filename).open('r'))
|
||||
loaded_model = restore_logger_and_model(model_path)
|
||||
loaded_model.eval()
|
||||
|
||||
input_pc = read_pointcloud(input_pc_path, ' ', False)
|
||||
|
||||
input_pc = normalize_pointcloud(input_pc)
|
||||
|
||||
grid_clusters = cluster_cubes(input_pc, [1,1,1], 2048)
|
||||
|
||||
ps.init()
|
||||
|
||||
for i,grid_cluster_pc in enumerate(grid_clusters):
|
||||
|
||||
print("Cluster pointcloud size: {}".format(grid_cluster_pc.shape[0]))
|
||||
|
||||
pc_with_prim_type = predict_prim_type(grid_cluster_pc, loaded_model)
|
||||
|
||||
#pc_with_prim_type = polytopes_to_planes(pc_with_prim_type)
|
||||
|
||||
pc_with_prim_type = append_onehotencoded_type(pc_with_prim_type)
|
||||
|
||||
pc = ps.register_point_cloud("points_" + str(i), pc_with_prim_type[:, :3], radius=0.01)
|
||||
pc.add_color_quantity("prim types", label2color(pc_with_prim_type[:, 6].astype(np.int64)), True)
|
||||
|
||||
ps.show()
|
||||
|
||||
print('Done')
|
Reference in New Issue
Block a user