pointnet2 working - TODO: Eval!
This commit is contained in:
@@ -1,17 +1,27 @@
|
||||
import pickle
|
||||
from collections import defaultdict
|
||||
|
||||
from abc import ABC
|
||||
from pathlib import Path
|
||||
|
||||
from torch.utils.data import Dataset
|
||||
from ml_lib.point_toolset.sampling import FarthestpointSampling
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class _Point_Dataset(ABC, Dataset):
|
||||
|
||||
@property
|
||||
def sample_shape(self):
|
||||
# FixMe: This does not work when more then x/y tuples are returned
|
||||
return self[0][0].shape
|
||||
|
||||
@property
|
||||
def setting(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
headers = ['x', 'y', 'z', 'nx', 'ny', 'nz', 'label', 'cl_idx']
|
||||
headers = ['x', 'y', 'z', 'xn', 'yn', 'zn', 'label', 'cl_idx']
|
||||
|
||||
def __init__(self, root=Path('data'), sampling_k=2048, transforms=None, load_preprocessed=True, *args, **kwargs):
|
||||
super(_Point_Dataset, self).__init__()
|
||||
@@ -21,13 +31,32 @@ class _Point_Dataset(ABC, Dataset):
|
||||
self.sampling_k = sampling_k
|
||||
self.sampling = FarthestpointSampling(K=self.sampling_k)
|
||||
self.root = Path(root)
|
||||
self.raw = root / 'raw'
|
||||
self.raw = self.root / 'raw'
|
||||
self.processed_ext = '.pik'
|
||||
self.raw_ext = '.xyz'
|
||||
self.processed = root / self.setting
|
||||
self.processed = self.root / self.setting
|
||||
self.processed.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._files = list(self.raw.glob(f'*{self.setting}*'))
|
||||
|
||||
def _read_or_load(self, item):
|
||||
raw_file_path = self._files[item]
|
||||
processed_file_path = self.processed / raw_file_path.name.replace(self.raw_ext, self.processed_ext)
|
||||
|
||||
if not self.load_preprocessed:
|
||||
processed_file_path.unlink(missing_ok=True)
|
||||
if not processed_file_path.exists():
|
||||
pointcloud = defaultdict(list)
|
||||
with raw_file_path.open('r') as raw_file:
|
||||
for row in raw_file:
|
||||
values = [float(x) for x in row.strip().split(' ')]
|
||||
for header, value in zip(self.headers, values):
|
||||
pointcloud[header].append(value)
|
||||
for key in pointcloud.keys():
|
||||
pointcloud[key] = np.asarray(pointcloud[key])
|
||||
with processed_file_path.open('wb') as processed_file:
|
||||
pickle.dump(pointcloud, processed_file)
|
||||
return processed_file_path
|
||||
|
||||
def __len__(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import pickle
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
from ._point_dataset import _Point_Dataset
|
||||
|
||||
@@ -19,27 +17,17 @@ class FullCloudsDataset(_Point_Dataset):
|
||||
return len(self._files)
|
||||
|
||||
def __getitem__(self, item):
|
||||
raw_file_path = self._files[item]
|
||||
processed_file_path = self.processed / raw_file_path.name.replace(self.raw_ext, self.processed_ext)
|
||||
if not self.load_preprocessed:
|
||||
processed_file_path.unlink(missing_ok=True)
|
||||
if not processed_file_path.exists():
|
||||
pointcloud = defaultdict(list)
|
||||
with raw_file_path.open('r') as raw_file:
|
||||
for row in raw_file:
|
||||
values = [float(x) for x in row.split(' ')]
|
||||
for header, value in zip(self.headers, values):
|
||||
pointcloud[header].append(value)
|
||||
for key in pointcloud.keys():
|
||||
pointcloud[key] = np.asarray(pointcloud[key])
|
||||
with processed_file_path.open('wb') as processed_file:
|
||||
pickle.dump(pointcloud, processed_file)
|
||||
processed_file_path = self._read_or_load(item)
|
||||
|
||||
with processed_file_path.open('rb') as processed_file:
|
||||
pointcloud = pickle.load(processed_file)
|
||||
points = np.stack(pointcloud['x'], pointcloud['y'], pointcloud['z'])
|
||||
normal = np.stack(pointcloud['xn'], pointcloud['yn'], pointcloud['zn'])
|
||||
label = points['label']
|
||||
samples = self.sampling(points)
|
||||
points = np.stack((pointcloud['x'], pointcloud['y'], pointcloud['z'],
|
||||
pointcloud['xn'], pointcloud['yn'], pointcloud['zn']
|
||||
),
|
||||
axis=-1)
|
||||
# When yopu want to return points and normal seperately
|
||||
# normal = np.stack((pointcloud['xn'], pointcloud['yn'], pointcloud['zn']), axis=-1)
|
||||
label = pointcloud['label']
|
||||
sample_idxs = self.sampling(points)
|
||||
|
||||
return points[samples], normal[samples], label[samples]
|
||||
return points[sample_idxs].astype(np.float), label[sample_idxs].astype(np.int)
|
||||
|
||||
@@ -1,6 +1,32 @@
|
||||
from torch.utils.data import Dataset
|
||||
import pickle
|
||||
import numpy as np
|
||||
|
||||
from ._point_dataset import _Point_Dataset
|
||||
|
||||
|
||||
class TemplateDataset(_Point_Dataset):
|
||||
class FullCloudsDataset(_Point_Dataset):
|
||||
|
||||
setting = 'grid'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TemplateDataset, self).__init__()
|
||||
super(FullCloudsDataset, self).__init__(*args, **kwargs)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._files)
|
||||
|
||||
def __getitem__(self, item):
|
||||
processed_file_path = self._read_or_load(item)
|
||||
|
||||
with processed_file_path.open('rb') as processed_file:
|
||||
pointcloud = pickle.load(processed_file)
|
||||
points = np.stack((pointcloud['x'], pointcloud['y'], pointcloud['z'],
|
||||
pointcloud['xn'], pointcloud['yn'], pointcloud['zn']
|
||||
),
|
||||
axis=-1)
|
||||
|
||||
# When yopu want to return points and normal seperately
|
||||
# normal = np.stack((pointcloud['xn'], pointcloud['yn'], pointcloud['zn']), axis=-1)
|
||||
label = np.stack((pointcloud['label'], pointcloud['cl_idx']))
|
||||
sample_idxs = self.sampling(points)
|
||||
|
||||
return points[sample_idxs], label[sample_idxs]
|
||||
@@ -1,8 +1,32 @@
|
||||
from torch.utils.data import Dataset
|
||||
import pickle
|
||||
import numpy as np
|
||||
|
||||
from ._point_dataset import _Point_Dataset
|
||||
|
||||
|
||||
class TemplateDataset(_Point_Dataset):
|
||||
class FullCloudsDataset(_Point_Dataset):
|
||||
|
||||
setting = 'prim'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TemplateDataset, self).__init__()
|
||||
super(FullCloudsDataset, self).__init__(*args, **kwargs)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._files)
|
||||
|
||||
def __getitem__(self, item):
|
||||
processed_file_path = self._read_or_load(item)
|
||||
|
||||
with processed_file_path.open('rb') as processed_file:
|
||||
pointcloud = pickle.load(processed_file)
|
||||
points = np.stack((pointcloud['x'], pointcloud['y'], pointcloud['z'],
|
||||
pointcloud['xn'], pointcloud['yn'], pointcloud['zn']
|
||||
),
|
||||
axis=-1)
|
||||
|
||||
# When yopu want to return points and normal seperately
|
||||
# normal = np.stack((pointcloud['xn'], pointcloud['yn'], pointcloud['zn']), axis=-1)
|
||||
label = np.stack((pointcloud['label'], pointcloud['cl_idx']))
|
||||
sample_idxs = self.sampling(points)
|
||||
|
||||
return points[sample_idxs], label[sample_idxs]
|
||||
@@ -10,4 +10,3 @@ class TemplateDataset(_Point_Dataset):
|
||||
|
||||
def __getitem__(self, item):
|
||||
return item
|
||||
|
||||
|
||||
Reference in New Issue
Block a user