From 49b373a8a1d0470ad94737efc358404f650086ea Mon Sep 17 00:00:00 2001 From: Si11ium Date: Fri, 19 Jun 2020 13:35:37 +0200 Subject: [PATCH] explicit model argument --- datasets/shapenet.py | 6 ++++-- models/point_net_2.py | 6 ++++-- utils/pointcloud.py | 6 +++--- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/datasets/shapenet.py b/datasets/shapenet.py index 8917d79..eb7ed55 100644 --- a/datasets/shapenet.py +++ b/datasets/shapenet.py @@ -1,4 +1,5 @@ from pathlib import Path +from warnings import warn import numpy as np @@ -55,13 +56,14 @@ class CustomShapeNet(InMemoryDataset): def processed_file_names(self): return [f'{self.mode}.pt'] - def download(self): + def __download(self): dir_count = len([name for name in os.listdir(self.raw_dir) if os.path.isdir(os.path.join(self.raw_dir, name))]) if dir_count: print(f'{dir_count} folders have been found....') return dir_count - raise IOError("No raw pointclouds have been found.") + warn(ResourceWarning("No raw pointclouds have been found. Was this intentional?")) + return dir_count @property def num_classes(self): diff --git a/models/point_net_2.py b/models/point_net_2.py index aad346c..a8621bf 100644 --- a/models/point_net_2.py +++ b/models/point_net_2.py @@ -23,8 +23,10 @@ class PointNet2(BaseValMixin, # Dataset # ============================================================================= - self.dataset = self.build_dataset(ShapeNetPartSegDataset, collate_per_segment=True, - npoints=self.params.npoints) + self.dataset = self.build_dataset(ShapeNetPartSegDataset, + collate_per_segment=True, + npoints=self.params.npoints + ) # Model Paramters # ============================================================================= diff --git a/utils/pointcloud.py b/utils/pointcloud.py index 8e28b4f..d1c272c 100644 --- a/utils/pointcloud.py +++ b/utils/pointcloud.py @@ -110,7 +110,7 @@ def cluster_per_column(pc, column): def cluster_cubes(data, cluster_dims, max_points_per_cluster=-1, min_points_per_cluster=-1): - if cluster_dims[0] is 1 and cluster_dims[1] is 1 and cluster_dims[2] is 1: + if cluster_dims[0] == 1 and cluster_dims[1] == 1 and cluster_dims[2] == 1: print("no need to cluster.") return [farthest_point_sampling(data, max_points_per_cluster)] @@ -141,7 +141,7 @@ def cluster_cubes(data, cluster_dims, max_points_per_cluster=-1, min_points_per_ final_clusters = [] for key, cluster in clusters.items(): c = np.vstack(cluster) - if c.shape[0] < min_points_per_cluster and -1 is not min_points_per_cluster: + if c.shape[0] < min_points_per_cluster and -1 != min_points_per_cluster: continue if max_points_per_cluster is not -1: @@ -171,7 +171,7 @@ def cluster_dbscan(data, selected_indices, eps, min_samples=5, metric='euclidean clusters = {} for idx, l in enumerate(labels): - if l is -1: + if l == -1: continue clusters.setdefault(str(l), []).append(data[idx, :])