-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpydar.py
10739 lines (9459 loc) · 446 KB
/
pydar.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# -*- coding: utf-8 -*-
"""
Classes and functions for registering and displaying mosaic lidar scans.
In this module we take an object oriented approach to managing single scans,
projects (collections of scans all from the same day or 2 days sometimes),
tiepoint lists, and scan areas (collections of projects covering the same
physical regions). Each of these categories is represented by a class.
Created on Tue Sep 8 10:46:27 2020
@author: d34763s
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from scipy import ndimage
from scipy.spatial import Delaunay, cKDTree, KDTree
from scipy.special import erf, erfinv
from scipy.signal import fftconvolve
from scipy.stats import mode
from numpy.linalg import svd
import scipy.sparse as sp
from scipy.optimize import minimize, minimize_scalar
import pandas as pd
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
import os
import sys
import re
import copy
import json
import math
import warnings
import time
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
try:
import pyximport
pyximport.install(inplace=True, language_level=3)
except ImportError:
print('Failed to build cython_util, is the C/C++ compiler installed?')
try:
import cython_util
except ModuleNotFoundError:
print('cython_util was not imported, functions relying on it will fail')
try:
import pdal
except ModuleNotFoundError:
print('pdal not imported, functions relying on it will fail')
try:
import open3d as o3d
except ModuleNotFoundError:
print('open3d not imported, functions relying on it will fail')
try:
import cv2 as cv
except ModuleNotFoundError:
print('opencv was not imported, functions relying on it will fail')
try:
import torch
import gpytorch
except ModuleNotFoundError:
print('torch and gpytorch were not imported')
try:
import utils_find_1st as utf1st
except ModuleNotFoundError:
print('utf1st not loaded, local max finding will not work')
class TiePointList:
"""Class contains tiepointlist object and methods.
...
Attributes
----------
project_name : str
Filename of the RiSCAN project the tiepointlist comes from
project_path : str
Directory location of the project.
tiepoints : Pandas dataframe
List of tiepoint names and coordinates in project coordinate system.
tiepoints_transformed : Pandas dataframe
List of tiepoints transformed into a new coordinate system.
current_transform : tuple
Tuple with index of current transform (the one used to create
tiepoints_transformed).
pwdist : Pandas dataframe
List of unique pairwise distances between reflectors.
dict_compare : dict
Stores comparisons of self pwdist with other tiepointlists' pwdists.
transforms : dict
Stores transformations that aligns self tiepoints with others. Keyed
on tuples (name, str_reflector_list). Each entry is a tuple
(reflector_list, transform, std).
raw_history_dict : dict
A dict containing the history of modification dependencies as a tree
see SingleScan docstring for more details
transformed_history_dict : dict
Same as raw history dict for tiepoints_transformed
trans_history_dict : dict
for each transformation in transforms gives the node of the history
tree keyed the same as in transforms
Methods
-------
add_transform(name, transform, reflector_list=[], std=np.NaN)
Adds a transform to the transforms dataframe.
get_transform(index)
Returns the requested numpy array.
apply_transform(index)
Applies a transform in transforms to update tiepoints_transformed.
calc_pairwise_dist()
Calculates the distances between each unique pair of reflectors.
compare_pairwise_dist(other_tiepointlist)
Compares pwdist with other_tiepointlist, stores result in compare_dict
calc_transformation(other_tiepointlist, reflector_list, mode='LS')
Calculates best fitting rigid transformation to align with other.
plot_map(other_project_name, delaunay=False, mode='dist')
Plots a map of the change in reflector distances.
"""
def __init__(self, project_path, project_name):
"""Stores the project_path and project_name variables and loads the
tiepoints into a pandas dataframe"""
self.project_path = project_path
self.project_name = project_name
try:
self.tiepoints = pd.read_csv(os.path.join(project_path, project_name,
'tiepoints.csv'),
index_col=0, usecols=[0,1,2,3])
self.tiepoints.sort_index(inplace=True)
except FileNotFoundError:
self.tiepoints = pd.DataFrame()
warnings.warn('No tiepoints found for '
+ os.path.join(project_path, project_name))
# Ignore any tiepoints that start with t
for label in self.tiepoints.index:
if label[0]=='t':
self.tiepoints.drop(label, inplace=True)
# Add the identity matrix to the transform list
self.transforms = {('identity', '') : ([], np.eye(4), np.NaN)}
# Create the tiepoints_transformed dataframe
self.tiepoints_transformed = self.tiepoints.copy(deep=True)
self.current_transform = ('identity', '')
# Create and update history dicts
git_hash = get_git_hash()
self.raw_history_dict = {
"type": "Pointset Source",
"git_hash": git_hash,
"method": "TiePointList.__init__",
"filename": os.path.join(project_path, project_name,
'tiepoints.csv')
}
self.trans_history_dict = {}
self.trans_history_dict[('identity', '')] = {
"type": "Transform Source",
"git_hash": git_hash,
"method": "TiePointList.__init__",
"filename": ''
}
self.transformed_history_dict = {
"type": "Transformer",
"git_hash": git_hash,
"method": "TiePointList.__init__",
"input_0": self.raw_history_dict,
"input_1": self.trans_history_dict[('identity', '')]
}
def add_transform(self, name, transform, reflector_list=[], std=np.NaN,
history_dict=None):
"""
Add a transform to the transforms dict.
Parameters
----------
name : str
The name of the tranformation to put in self.transforms
transform : 4x4 ndarray
The affine transformation in homologous coordinates.
reflector_list : list, optional
List of reflectors used to find this transformation (if any).
The default is None.
std : float, optional
Standard deviation of residuals between aligned reflectors in m
if transformation is from reflectors. The default is None.
history_dict : dict
dict tree containing history of transform. If None then we create
a Transform Source node with the matrix as a param. The default
is None.
Returns
-------
key : tuple
The tuple that the tranform is keyed (indexed) in on transforms.
"""
# Index into the transforms dict
str_reflector_list = ', '.join(reflector_list)
self.transforms[(name, str_reflector_list)] = (reflector_list,
transform,
std)
if history_dict is None:
warnings.warn('You are adding a transform with no history' +
' make sure this is intended')
self.trans_history_dict[(name, str_reflector_list)] = {
"type": "Transform Source",
"git_hash": get_git_hash(),
"method": "SingleScan.add_transform",
"filename": '',
"params": {"transform": transform.tolist()}
}
else:
self.trans_history_dict[(name, str_reflector_list)] = history_dict
# Return key (index) of transform
return (name, str_reflector_list)
def get_transform(self, index, history_dict=False):
"""
Return the requested transform's array.
Parameters
----------
index : tuple
Key for desired transform in self.transforms.
Returns
-------
ndarray or (ndarray, dict)
ndarray is 4x4 matrix of the requested transform. dict is the
transform's history_dict
"""
if history_dict:
return (self.transforms[index][1], json.loads(json.dumps(
self.trans_history_dict[index])))
else:
warnings.warn("You are getting a transform without its history")
return self.transforms[index][1]
def apply_transform(self, index):
"""
Apply the transform in transforms to update tiepoints_transformed
Parameters
----------
index : tuple
Index of the transform in self.transforms to apply
Returns
-------
None.
"""
# extract positions in homogenous coordinates
x_vec = np.ones((4, self.tiepoints.shape[0]))
x_vec[:-1, :] = self.tiepoints.to_numpy().T
# apply transformation
y_vec = np.matmul(self.transforms[index][1], x_vec)
# replace values in self.tiepoints_transformed
self.tiepoints_transformed[:] = y_vec[:3, :].T
# Update current_transform
self.current_transform = index
# Update history dict
self.transformed_history_dict["input_1"] = self.trans_history_dict[
index]
def calc_pairwise_dist(self):
"""Calculate the pairwise distances between each unique pair of
reflectors"""
# Use list of dictionaries approach: https://stackoverflow.com/questions/10715965/add-one-row-to-pandas-dataframe
rows_list = []
for indexA, rowA in self.tiepoints.iterrows():
for indexB, rowB in self.tiepoints.iterrows():
if (indexA >= indexB):
continue
else:
dict1 = {}
dict1.update({'rA': indexA, 'rB': indexB,
'dist': np.sqrt(
(rowA[0] - rowB[0])**2 +
(rowA[1] - rowB[1])**2 +
(rowA[2] - rowB[2])**2)})
rows_list.append(dict1)
self.pwdist = pd.DataFrame(rows_list)
self.pwdist.set_index(['rA', 'rB'], inplace=True)
def compare_pairwise_dist(self, other_tiepointlist):
"""
Compares pairwise distances of TiePointList with the other one and
stores results in a dictionary whose key is the name of the other.
Note, our typical usage will be to compare the current tiepointlist
with one from a week prior. Thus the difference we calculate is this
list's distance minus the other distance.
Parameters
----------
other_tiepointlist : TiePointList to compare to.
These tiepoints will be represented by rB and dist_y.
Returns
-------
None. But stores results in a dictionary. Keyed on name of other.
"""
# Create Dictionary if it doesn't exist
if not hasattr(self, 'dict_compare'):
self.dict_compare = {}
# Now Calculate pairwise distances and store
if not hasattr(self, 'pwdist'):
self.calc_pairwise_dist()
if not hasattr(other_tiepointlist, 'pwdist'):
other_tiepointlist.calc_pairwise_dist()
df = pd.merge(self.pwdist, other_tiepointlist.pwdist, how='inner',
left_index=True, right_index=True)
df['diff'] = df['dist_x'] - df['dist_y']
df['diff_abs'] = abs(df['diff'])
df['strain'] = df['diff']/df['dist_y']
df.sort_values(by='diff_abs', inplace=True)
self.dict_compare.update({other_tiepointlist.project_name: df})
def calc_transformation(self, other_tiepointlist, reflector_list,
mode='LS', use_tiepoints_transformed=True,
yaw_angle=0):
"""
Calculate the rigid transformation to align with other tiepointlist.
See info under mode. In either mode, we start by computing the
centroids of the selected reflectors in both lists and create arrays
of position relative to centroid. Then we use the singular value
decomposition to find the rotation matrix (either in 3 dimensions or
1 depending on mode) that best aligns the reflectors. Finally, we
solve for the appropriate translation based on the centroids. We store
the result as a 4x4 matrix in self.transforms.
The method used here is based off of Arun et al 1987:
https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4767965
Parameters
----------
other_tiepointlist : TiePointList
TiePointList to compare with.
reflector_list : list
List of tiepoints to align.
mode : str, optional
Mode of the transformation, in 'LS' transformation can rotate on
all 3 axes to find best fit (must have at least 3 reflectors). In
'Yaw' the z axis is fixed and we are only rotating around it (it
still translates in all 3 dimensions). If 'trans', we only have 1
reflector and just translate. The default is 'LS'.
use_tiepoints_transformed : bool, optional
Whether to use the tiepoints_transformed from the other tiepoint
list or the raw ones. The default is True.
yaw_angle : float, optional
If the mode is 'Trans' this is the angle (in radians) by which to
change the heading of the scan. The default is 0.
Returns
-------
key : tuple
The tuple that the tranform is keyed (indexed) in on transforms.
"""
# Create history_dict for this operation
history_dict = {
"type": "Transform Computer",
"git_hash": get_git_hash(),
"method": 'TiePointList.calc_transformation',
"input_1": json.loads(json.dumps(self.raw_history_dict)),
"params": {"reflector_list": reflector_list,
"mode": mode,
"yaw_angle": yaw_angle}
}
# extract point lists and name as in Arun et al.
if use_tiepoints_transformed:
psubi_prime = other_tiepointlist.tiepoints_transformed.loc[
reflector_list].to_numpy().T
history_dict["input_0"] = json.loads(json.dumps(
other_tiepointlist.transformed_history_dict))
else:
psubi_prime = other_tiepointlist.tiepoints.loc[
reflector_list].to_numpy().T
history_dict["input_0"] = json.loads(json.dumps(
other_tiepointlist.raw_history_dict))
psubi = self.tiepoints.loc[reflector_list].to_numpy().T
# Compute centroids
p_prime = psubi_prime.mean(axis=1).reshape((3,1))
p = psubi.mean(axis=1).reshape((3,1))
# Translate such that centroids are at zero
qsubi_prime = psubi_prime - p_prime
qsubi = psubi - p
# Compute best fitting rotation matrix R
if (mode=='LS'):
# Calculate the 3x3 matrix H (Using all 3 axes)
H = np.matmul(qsubi, qsubi_prime.T)
# Find it's singular value decomposition
U, S, Vh = svd(H)
# Calculate X, the candidate rotation matrix
X = np.matmul(Vh.T, U.T)
# Check if the determinant of X is near 1, this should basically
# alsways be the case for our data
if np.isclose(1, np.linalg.det(X)):
R = X
elif np.isclose(-1, np.linalg.det(X)):
V_prime = np.array([Vh[0,:], Vh[1,:], -1*Vh[2,:]]).T
R = np.matmul(V_prime, U.T)
print(R)
elif (mode=='Yaw'):
# If we are locking the x-y plane we can modify the above process
# to just find a rotation in 2 dimensions
# Calculate the 2x2 matrix H
H = np.matmul(qsubi[:2,:], qsubi_prime[:2,:].T)
# Find it's singular value decomposition
U, S, Vh = svd(H)
# Calculate X, the candidate rotation matrix
X = np.matmul(Vh.T, U.T)
# Check if the determinant of X is near 1, this should basically
# alsways be the case for our data
R = np.eye(3)
if np.isclose(1, np.linalg.det(X)):
R[:2, :2] = X
elif np.isclose(-1, np.linalg.det(X)):
V_prime = np.array([Vh[0,:], -1*Vh[1,:]]).T
R[:2, :2] = np.matmul(V_prime, U.T)
elif (mode=='Trans'):
# If we are just in translational mode then rotation matrix is
# Determined by yaw angle.
R = np.eye(3)
R[0,0] = np.cos(yaw_angle)
R[0,1] = -1*np.sin(yaw_angle)
R[1,0] = np.sin(yaw_angle)
R[1,1] = np.cos(yaw_angle)
# Now find translation vector to align centroids
T = p_prime - np.matmul(R, p)
# Combine R and T into 4x4 matrix
A = np.eye(4)
A[:3, :3] = R
A[:3, 3] = T.squeeze()
# Compute standard deviation of euclidean distances.
if not (mode=='Trans'):
p_trans = np.matmul(R, psubi) + T
dist = np.sqrt((np.square(psubi_prime - p_trans)).sum(axis=0))
std = std=dist.std()
else:
std = np.NaN
# Create history_dict for this operation
# Add matrix to transforms, including it's history dict
key = self.add_transform(other_tiepointlist.project_name + '_' + mode,
A, reflector_list, std=std,
history_dict=history_dict)
# Return key (index) of tranform in self.transforms.
return key
def plot_map(self, other_project_name, delaunay=False, mode='dist',
use_tiepoints_transformed=False):
"""
Plot lines showing change in distances or strains between two scans.
Parameters
----------
other_project_name : str
Other project must already be in dict_compare.
delaunay : bool, optional
Whether to plot just the lines that are part of the Delaunay
triangulation. The default is False.
mode : str, optional
If 'dist' display differences as distances, if 'strain' display
differences as strains. The default is 'dist'.
use_tiepoints_transformed : bool, optional
If true plot tiepoints at locations given by tiepoints_transformed.
The default is False.
Returns
-------
Matplotlib figure and axes objects.
"""
# First let's just plot the reflectors and their names.
f, ax = plt.subplots(1, 1, figsize=(10, 10))
# Looping is inefficient but we have very few points.
# Let's limit to just plotting the reflectors present in both
tup = self.dict_compare[other_project_name].index.tolist()
as_arr = np.array(list(map(lambda x: x[0], tup)) +
list(map(lambda x: x[1], tup)))
common_reflectors = np.unique(as_arr)
if use_tiepoints_transformed:
for index, row in self.tiepoints_transformed.iterrows():
if (index in common_reflectors):
ax.scatter(row[0], row[1], s=10, c='k')
ax.text(row[0]+5, row[1]+5, s=index, fontsize=12)
else:
for index, row in self.tiepoints.iterrows():
if (index in common_reflectors):
ax.scatter(row[0], row[1], s=10, c='k')
ax.text(row[0]+5, row[1]+5, s=index, fontsize=12)
# If we are just plotting delaunay lines calculate delaunay triang.
if delaunay:
tri = Delaunay(self.tiepoints.loc[common_reflectors].
to_numpy()[:,:2])
# Now we want to create a list of tuples matching the multiindex
# in dict_compare
indptr, indices = tri.vertex_neighbor_vertices
delaunay_links = []
for i in range(common_reflectors.size): #range(self.tiepoints.shape[0]):
#r_start = self.tiepoints.index[i]
r_start = common_reflectors[i]
for neighbor in indices[indptr[i]:indptr[i+1]]:
#r_end = self.tiepoints.index[neighbor]
r_end = common_reflectors[neighbor]
# Preserve ordering style in multiindex
if (r_end > r_start):
delaunay_links.append((r_start, r_end))
# Create appropriate diverging color map for changes in distance
# We'll use RdBu for now and flip it so that Blue is contraction
if mode=='dist':
max_abs_diff = (self.dict_compare[other_project_name]
['diff_abs'].to_numpy().max())
elif mode=='strain':
max_abs_diff = (abs(self.dict_compare[other_project_name]
['strain'].to_numpy()).max())
norm_color = Normalize(vmin=-1*max_abs_diff, vmax=max_abs_diff)
# Now plot lines between pairs of reflectors
if use_tiepoints_transformed:
for index, row in self.dict_compare[other_project_name].iterrows():
if (delaunay and not (index in delaunay_links)):
continue
# Color indicates change, blue is shortening, red is lengthening
if mode=='dist':
c = cm.RdBu_r(row['diff']/max_abs_diff + .5)
elif mode=='strain':
c = cm.RdBu_r(row['strain']/max_abs_diff + .5)
ax.plot([self.tiepoints_transformed.loc[index[0],'X[m]'],
self.tiepoints_transformed.loc[index[1],'X[m]']],
[self.tiepoints_transformed.loc[index[0],'Y[m]'],
self.tiepoints_transformed.loc[index[1],'Y[m]']],
c=c)
if delaunay:
if mode=='dist':
ax.text((self.tiepoints_transformed.loc[index[0],'X[m]'] +
self.tiepoints_transformed.loc[index[1],'X[m]'])/2,
(self.tiepoints_transformed.loc[index[0],'Y[m]'] +
self.tiepoints_transformed.loc[index[1],'Y[m]'])/2,
s = format(row['diff'], '.2f'))
elif mode=='strain':
ax.text((self.tiepoints_transformed.loc[index[0],'X[m]'] +
self.tiepoints_transformed.loc[index[1],'X[m]'])/2,
(self.tiepoints_transformed.loc[index[0],'Y[m]'] +
self.tiepoints_transformed.loc[index[1],'Y[m]'])/2,
s = format(row['strain'], '.4f'))
else:
for index, row in self.dict_compare[other_project_name].iterrows():
if (delaunay and not (index in delaunay_links)):
continue
# Color indicates change, blue is shortening, red is lengthening
if mode=='dist':
c = cm.RdBu_r(row['diff']/max_abs_diff + .5)
elif mode=='strain':
c = cm.RdBu_r(row['strain']/max_abs_diff + .5)
ax.plot([self.tiepoints.loc[index[0],'X[m]'],
self.tiepoints.loc[index[1],'X[m]']],
[self.tiepoints.loc[index[0],'Y[m]'],
self.tiepoints.loc[index[1],'Y[m]']],
c=c)
if delaunay:
if mode=='dist':
ax.text((self.tiepoints.loc[index[0],'X[m]'] +
self.tiepoints.loc[index[1],'X[m]'])/2,
(self.tiepoints.loc[index[0],'Y[m]'] +
self.tiepoints.loc[index[1],'Y[m]'])/2,
s = format(row['diff'], '.3f'))
elif mode=='strain':
ax.text((self.tiepoints.loc[index[0],'X[m]'] +
self.tiepoints.loc[index[1],'X[m]'])/2,
(self.tiepoints.loc[index[0],'Y[m]'] +
self.tiepoints.loc[index[1],'Y[m]'])/2,
s = format(row['strain'], '.4f'))
# Add colorbar
f.colorbar(cm.ScalarMappable(norm=norm_color, cmap='RdBu_r'), ax=ax)
ax.axis('equal')
ax.set_title('Change from ' + other_project_name + ' to '
+ self.project_name)
class SingleScan:
"""
Container for single lidar scan and methods for displaying it.
...
Attributes
----------
project_path : str
Path to folder containing all Riscan projects.
project_name : str
Name of Riscan project.
scan_name : str
Typically ScanPos0XX where XX is the scan number.
poly : int
Which polydata from the list to take.
transform_dict : dict
dict of vtkTransforms linked with this single scan.
transform : vtkTransform
pipelined, concatenated vtkTransform to apply to this scan.
transformFilter : vtkTransformPolyDataFilter
filter that applies the transform above
filterName : str
name of the current filter whose output is in currentFilter
currentFilter : vtkThresholdPoints
vtkThresholdPoints with all transformed points that haven't been
filtered out.
filteredPoints : vtkThresholdPoints
vtkThresholdPoints containing all points that have been filtered out.
filterDict : dict
dict of vtkFilter objects
mapper : vtkPolyDataMapper
vtk mapper object
actor : vtkActor
vtk actor object
polydata_raw : vtkPolyData
Raw data read in from Riscan, we will add arrays to PointData. This
polydata's PointData includes an array Classification. This is a uint8
array with the classification of points defined as in the LAS
specification from ASPRS:
https://www.asprs.org/wp-content/uploads/2019/07/LAS_1_4_r15.pdf
Plus additional catagories defined here
0 : Created, Never Classified
1 : Unclassified
2 : Ground
6 : Building
7 : Low Point (Noise)
64: High Elevation (Classified by elevation_filter)
65: Snowflake (Classified by returnindex filter)
66: Reflectance (high reflectance points and neighborhoods if desired)
67: Pole
68: Human
69: Snowmobile
70: Road
71: Flag
72: Wire
73: Manually Removed (mostly the ship and logistics area)
dsa_raw : vtk.numpy_interface.dataset_adapter.Polydata
dataset adaptor object for interacting with polydata_raw
man_class : pandas dataframe
Dataframe containing information on manually classified points. The
dataframe is keyed on PointId and contains:
X, Y, Z: position of the point in the scanner's own coordinate
system
trans_XX: transformation matrix where the point was selected. To
simplify I/O we break into the 12 components 00-11. We don't
need 16 because the last row of a rigid transformation is
0 0 0 1.
Linearity, Planarity, Scattering, Verticality: the four
Demantke2011 dimensionalities in the reference frame
corresponding to that transformation matrix.
elev: The vertical position of the point in the given reference
frame
dist: The distance from the scanner
Amplitude: measures of the pulse quality
Classification: Manual classification (number)
The expected usage here is that the scan was orginally loaded from a
LAS file and that the PointId field created on that original loading
is the same as the PointId's of the points we add to this dataframe.
Doing otherwise may cause duplicate points which could cause errors
later on.
raw_history_dict : dict
A dict containing the history of modification dependencies to the
SingleScan as a tree. Every node in the tree contains the following
attributes as strings: "type", "git_hash", "method". It then contains
0 (if it's a source), 1 (filters), or 2 geometric
inputs, as history_dicts of themselves. Then it contains an arbitrary
number of parameters. If the node has two input geometries, then the
output is considered to be the first geometry acting on the zeroth
(if some sense of ordering is required). There are two kinds of
geometric objects, pointsets (e.g. lidar point clouds, reflector lists)
and transforms.
Node examples:
"type": "Transformer"
"git_hash":
"method":
"input_0": pointset
"input_1": transform
the output of this is a pointset (input 0 transformed by input 1)
"type": "Transform Computer"
"git_hash":
"method":
"input_0": pointset
"input_1": pointset
"params": {...}
the output of this is a transform (aligning input 1 with input 0)
"type": "Transform Concatenator"
"git_hash":
"method":
"input_0": transform
"input_1": transform
"params": {...}
the output of this a transform, the result of transforming by
input_0 followed by input_1
"type": "Pointset Aggregator"
"git_hash":
"method":
"input_0": pointset
"input_1": pointset
the output of this is a pointset (input_0 and input_1 concatenated)
"type": "Filter"
"git_hash":
"method":
"input_0": pointset
"params": {...}
the output of this is a pointset that's a subset of input_0
"type": "Scalar Modifier"
"git_hash":
"method":
"name":
"input_0": pointset
"params": {...}
the output of this is a pointset with the same geometry as input_0
"type": "Pointset Source"
"git_hash":
"method":
"filename": str
"params": {...}
the output of this is pointset. Note, this is only appropriate for
the first time we import a pointset from RiSCAN, if we are loading
saved intermediate data we should also load its history_dict.
"type": "Transform Source"
"git_hash":
"method":
"filename": str
"params": {...}
the output of this is a transform. If filename is empty and params
is empty then identity transform is assumed.
transformed_history_dict : dict
Same structure as raw_history_dict. self.raw_history_dict should be
the "input_0" value.
NOTE: here input_0 is passed by reference so that if for example,
we add arrays to polydata_raw (via "Scalar_Modifier") that carries
through.
"type": "Transformer"
"git_hash":
"method":
"input_0": self.raw_history_dict
"input_1": (dict corresponding to the current transform,)
filt_history_dict : dict
Same structure as raw_history_dict. self.transformed_history_dict
should be the input_0 value:
"type": "Filter"
"git_hash":
"method":
"input_0": self.transformed_history_dict
"params": {...}
NOTE: here input_0 is passed by reference so that if for example,
we add arrays to polydata_raw (via "Scalar_Modifier") that carries
through.
trans_history_dict : dict
for each transformation in transform_dict gives the node to the
history tree, keyed off the same key.
labels : pandas dataframe
dataframe containing category, subcategory, id, x, y, z for manually
labelled points (e.g. stakes). x, y, z coordinates are in the Scanners
Own Coordinate System.
Methods
-------
write_scan()
Writes the scan to a file to save the filters
write_current_transform()
Writes the current transform and its history_dict to files.
read_transform()
Reads a transform from file and places it in current transforms.
load_man_class()
Load the manual classification dataframe
load_labels()
Load the labels dataframe
add_label(category, subcategory, id, x, y, z, transform="current")
Add a label to the labels dataframe
get_label_point(category, subcategory, id, transform="current")
Get the x, y, z for a given label.
get_labels()
Get the labels dataframe, includes columns for transformed coordinates
add_transform(key, matrix, history_dict=None)
add a transform to transform_dict
update_current_filter(class_list)
update the current filter object with a new class_list
add_sop()
load the appropriate SOP matrix into transform_dict
add_z_offset(z_offset, history_dict=None)
add a z_offset transformation to transform_dict
get_polydata(port=False, history_dict=False)
Returns the polydata object for the current settings of transforms
and filters.
apply_transforms(transform_list)
updates transform to be concatenation of transforms in transform list.
random_voxel_downsample_filter(wx, wy, wz=None, seed=1234)
Downsample point cloud with one random point per voxel.
clear_classification(category=None)
Reset all Classification values to 0.
apply_man_class()
For points that we have manually classified, set their classification.
update_man_class(pdata, classification)
Update the points in man_class with the points in pdata.
update_man_class_fields(update_fields='all', update_trans=True)
Update the man_class table with values from the fields currently in
polydata_raw. Useful, for example if we've improved the HAG filter and
don't want to have to repick all points.
create_normals(radius=2, max_nn=30)
Estimate point normals (using Open3D).
create_z_sigma()
For the current value of the transformation, project the pointwise
uncertainty spheroids onto the z-axis and save in PointData.
apply_elevation_filter(z_max)
Filter out all points above a certain height. Sets the flag in
Classification to 64.
apply_rmin_filter(buffer=0.05, count=150000)
Assign all points very close to the scanner as snowflakes.
apply_snowflake_filter_3(z_std_mult, leafsize):
Filter points as snowflakes based on whether their z value in the
transformed reference frame exceeds z_std_mult multiples of the mean
z values for points nearby (within a bucket of size, leafsize)
apply_snowflake_filter_returnindex(cylinder_rad, radial_precision)
Filter snowflakes based on their return index and whether they are on
the border of the visible region.
apply_manual_filter(corner_coords, normal=(0, 0, 1), category=73)
Classify points by a selection loop.
apply_cylinder_filter(x, y, r, category=73)
Classify all points within a horizontal distance r of a given point.
create_scanner_actor(color="Gray", length=150)
Create an actor for visualizing the scanner and its orientation.
create_labels_actors(color='White', row_index=None)
Create dataframe containing actors for each label
create_filter_pipeline(colors)
Create mapper and actor displaying points colored by Classification
create_solid_pipeline(color='Green')
Create vtk visualization pipeline with solid colors
create_elevation_pipeline(z_min, z_max, lower_threshold=-1000,
upper_threshold=1000, LOD=True,
cmap_name='rainbow')
create mapper and actor for displaying points with colors by elevation
create_reflectance()
Create reflectance field in polydata_raw according to RiSCAN instructs.
create_reflectance_pipeline(v_min, v_max, field='Reflectance')
create mapper and actor for displaying points with colors by reflectance
correct_reflectance_radial(mode)
Adjust reflectance for radial artifact.
reflectance_filter(threshold, radius=0, field='reflectance_radial')
Set Classification values for high reflectance objects (and neighborhood
if desired) to 66.
write_npy_pdal(output_dir, filename, mode)
Write SingleScan to numpy structured array that can be read by pdal.
write_pdal_transformation_json(mode, input_dir, output_dir)
Write a JSON string for PDAL such that it transforms raw scan data
by self.transform.
add_dist()
Add distance from scanner to polydata_raw
get_local_max(z_threshold, rmax, return_dist=False, return_zs=False)
Returns the set of points that are locally maximal in the current
transformation and, optionally, their distance from the scanner
and z sigma.
"""
def __init__(self, project_path, project_name, scan_name,
import_mode=None, poly='.1_.1_.01',
read_scan=False, import_las=False, create_id=True,
las_fieldnames=None,
class_list=[0, 1, 2, 70], read_dir=None, suffix='',
class_suffix=''):
"""
Creates SingleScan object and transformation pipeline.
Note, if a polydata folder with the desired suffix does not exist then
we will produce many vtk warnings (so I don't recommend this)
Parameters
----------
project_path : str
Path to folder containing all Riscan projects.
project_name : str
Name of Riscan project.
scan_name : str
Typically ScanPos0XX where XX is the scan number.
import_mode : str, optional
How to create polydata_raw, the base data for this SingleScan.
Options are: 'poly' (read from Riscan generated poly), 'read_scan'
(read saved vtp file), 'import_las' (use pdal to import from las
file generate by Riscan), 'empty' (create an empty polydata,
useful if we just want to work with transformations). 'import_npy'
(import from npyfiles directories) If value is None, then code
will interpret values of read_scan and import_las
(deprecated method of specifying which to import) to maintain
backwards compatibility. The default is None.
poly : str, optional
The suffix describing which polydata to load. The default is
'.1_.1_.01'.
read_scan : bool, optional
Whether to read a saved scan from file. Typically useful for
handling filtered scans. The default is False. Deprecated,
use import_mode.
import_las: bool, optional
If true (and read_scan is False) read in the las file instead of
the polydata. The default is False.
create_id: bool, optional
If true and PointId's do not exist create PointIds. The default
is True.
las_fieldnames: list, optional
List of fieldnames to load if we are importing from a las file
Must include 'Points'. If None, and we are loading scans, read
all arrays. If None and we are importing las then set to
['Points', 'NumberOfReturns', 'ReturnIndex', 'Reflectance',
'Amplitude']. The default is None.
class_list : list, optional
List of categories this filter will return, if special value:
'all' Then we do not have a selection filter and we pass through
all points. The default is [0, 1, 2, 70].
read_dir : str, optional
Directory to read scan from. Defaults to npyfiles if None. The
default is None.
suffix : str, optional
Suffix for npyfiles directory if we are reading scans. The default
is '' which corresponds to the regular npyfiles directory.
class_suffix : str, optional
Suffix for which Classification[class_suffix].npy file to load as
'Classification' array. The default is '' (load Classification.npy)
Returns
-------
None.
"""
# Store instance attributes
self.project_path = project_path
self.project_name = project_name
self.scan_name = scan_name
self.poly = poly
self.class_suffix = class_suffix
# Get git_hash
git_hash = get_git_hash()
if import_mode is None:
# Issue a deprecated warning
warnings.warn("Use import_mode to indicate how SingleScan object" +
" should load polydata.", FutureWarning)
if read_scan:
import_mode = 'read_scan'
elif not import_las:
import_mode = 'poly'
elif import_las:
import_mode = 'import_las'
else:
raise RuntimeError("You have specified an invalid combination"
+ " of import flags")
# Read scan
if import_mode=='read_scan':
# Import directly from numpy files that we've already saved
if read_dir is None:
npy_path = os.path.join(self.project_path, self.project_name,
'npyfiles' + suffix, self.scan_name)
else:
npy_path = read_dir
if not os.path.isdir(npy_path):
raise ValueError('npyfiles directory does not exist')
# If las_fieldnames is None load all numpy files
if las_fieldnames is None: