-
Notifications
You must be signed in to change notification settings - Fork 10
Expand file tree
/
Copy pathcompute_features.py
More file actions
124 lines (83 loc) · 3.34 KB
/
compute_features.py
File metadata and controls
124 lines (83 loc) · 3.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Efficient implementation of features computation.
"""
#%% Imports
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from utils.ply import write_ply, read_ply
import time
import pickle
from os.path import exists
from features_computation.descriptors import local_PCA, compute_features
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
#%% Training data for the ground classifier
training_path = sys.argv[1]
# training_path = 'data_points_cloud/training/MiniLille1.ply'
training_features = np.empty((0, 4))
cloud_ply = read_ply(training_path)
points = np.vstack((cloud_ply['x'], cloud_ply['y'], cloud_ply['z'])).T
# data is very volumetric ! 2Mo points
print("Points shape:", points.shape)
#%% Compute SVD on the point cloud
n = points.shape[0]
d = points.shape[1]
barycenter = np.mean(points, axis=0)
Q = points - barycenter
cov_mat = (1/n)*Q.T@Q
assert cov_mat.shape == (d, d)
eigenvalues, eigenvectors = np.linalg.eigh(cov_mat)
#%%
bucket_size = 20
bucket_residual = 3
# Projection on the principal axis
scalar_product = (points-barycenter)@eigenvectors[:, 2]
hash_index = scalar_product//bucket_size
chunck_ids = np.unique(hash_index)
selected_index = 0
scalar_product[np.where(hash_index==selected_index)]
upper_bound = (selected_index+1)*bucket_size
lower_bound = selected_index*bucket_size
interest_indexes = np.where(hash_index==selected_index)[0]
fuzzy_indexes = np.where((scalar_product <= upper_bound+bucket_residual)*\
(scalar_product >= lower_bound-bucket_residual))[0]
# Store points with such voxelization
write_ply('./data_processing/interest_points.ply',
[points[interest_indexes], np.ones(interest_indexes.shape)*10],
['x', 'y', 'z', 'color'])
write_ply('./data_processing/interest_and_boundary_points.ply',
[points[fuzzy_indexes], np.ones(fuzzy_indexes.shape)*0],
['x', 'y', 'z', 'color'])
#%% Iterate for features computations
radius = 0.5
features = np.empty((0, 4))
features_index = []
feature_file = 'features/training/'+training_path.split('/')[-1].split('.')[0] + '_features.npy'
print(feature_file)
ii = 0
for selected_index in chunck_ids:
ii += 1
print('Compute features on chunck: {}/{}'.format(ii, len(chunck_ids)))
scalar_product[np.where(hash_index==selected_index)]
upper_bound = (selected_index+1)*bucket_size
lower_bound = selected_index*bucket_size
interest_indexes = np.where(hash_index==selected_index)[0]
fuzzy_indexes = np.where((scalar_product < upper_bound+bucket_residual)*\
(scalar_product >= lower_bound-bucket_residual))[0]
print(fuzzy_indexes.shape)
vert, line, plan, sphe = compute_features(points[interest_indexes],
points[fuzzy_indexes], radius)
slice_features = np.vstack((vert.ravel(), line.ravel(), plan.ravel(), sphe.ravel())).T
features_index += list(interest_indexes)
features = np.vstack((features, slice_features))
assert features.shape[0]==points.shape[0]
# sort on index to preserve initial order of points
df = pd.DataFrame(data=features, index=features_index)
features = df.sort_index().as_matrix()
np.save(feature_file, features)