forked from kenluozhenyu/One_Click_Meteor_Shower
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdetection.py
1640 lines (1350 loc) · 68.9 KB
/
detection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import copy
import math
import os
import shutil
import threading
import multiprocessing
from time import sleep
from keras_preprocessing.image import ImageDataGenerator
import model
import settings
class HoughBundler:
'''
source:
https://stackoverflow.com/questions/45531074/how-to-merge-lines-after-houghlinesp
Clasterize and merge each cluster of cv2.HoughLinesP() output
a = HoughBundler()
foo = a.process_lines(houghP_lines, binary_image)
'''
def get_orientation(self, line):
'''get orientation of a line, using its length
https://en.wikipedia.org/wiki/Atan2
'''
orientation = math.atan2(abs((line[0] - line[2])), abs((line[1] - line[3])))
return math.degrees(orientation)
def checker(self, line_new, groups, min_distance_to_merge, min_angle_to_merge):
'''Check if line have enough distance and angle to be count as similar
'''
for group in groups:
# walk through existing line groups
for line_old in group:
# check distance
if self.get_distance(line_old, line_new) < min_distance_to_merge:
# check the angle between lines
orientation_new = self.get_orientation(line_new)
orientation_old = self.get_orientation(line_old)
# if all is ok -- line is similar to others in group
if abs(orientation_new - orientation_old) < min_angle_to_merge:
group.append(line_new)
return False
# if it is totally different line
return True
def DistancePointLine(self, point, line):
"""Get distance between point and line
http://local.wasp.uwa.edu.au/~pbourke/geometry/pointline/source.vba
"""
px, py = point
x1, y1, x2, y2 = line
def lineMagnitude(x1, y1, x2, y2):
'Get line (aka vector) length'
lineMagnitude = math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2))
return lineMagnitude
LineMag = lineMagnitude(x1, y1, x2, y2)
if LineMag < 0.00000001:
DistancePointLine = 9999
return DistancePointLine
u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1)))
u = u1 / (LineMag * LineMag)
if (u < 0.00001) or (u > 1):
# // closest point does not fall within the line segment, take the shorter distance
# // to an endpoint
ix = lineMagnitude(px, py, x1, y1)
iy = lineMagnitude(px, py, x2, y2)
if ix > iy:
DistancePointLine = iy
else:
DistancePointLine = ix
else:
# Intersecting point is on the line, use the formula
ix = x1 + u * (x2 - x1)
iy = y1 + u * (y2 - y1)
DistancePointLine = lineMagnitude(px, py, ix, iy)
return DistancePointLine
def get_distance(self, a_line, b_line):
"""Get all possible distances between each dot of two lines and second line
return the shortest
"""
dist1 = self.DistancePointLine(a_line[:2], b_line)
dist2 = self.DistancePointLine(a_line[2:], b_line)
dist3 = self.DistancePointLine(b_line[:2], a_line)
dist4 = self.DistancePointLine(b_line[2:], a_line)
return min(dist1, dist2, dist3, dist4)
def merge_lines_pipeline_2(self, lines):
'Clusterize (group) lines'
groups = [] # all lines groups are here
# Parameters to play with
min_distance_to_merge = 30
min_angle_to_merge = 30
# first line will create new group every time
groups.append([lines[0]])
# if line is different from existing gropus, create a new group
for line_new in lines[1:]:
if self.checker(line_new, groups, min_distance_to_merge, min_angle_to_merge):
groups.append([line_new])
return groups
def merge_lines_segments1(self, lines):
"""Sort lines cluster and return first and last coordinates
"""
orientation = self.get_orientation(lines[0])
# special case
if (len(lines) == 1):
return [lines[0][:2], lines[0][2:]]
# [[1,2,3,4],[]] to [[1,2],[3,4],[],[]]
points = []
for line in lines:
points.append(line[:2])
points.append(line[2:])
# if vertical
if 45 < orientation < 135:
# sort by y
points = sorted(points, key=lambda point: point[1])
else:
# sort by x
points = sorted(points, key=lambda point: point[0])
# return first and last point in sorted group
# [[x,y],[x,y]]
return [points[0], points[-1]]
# def process_lines(self, lines, img):
def process_lines(self, lines):
'''Main function for lines from cv.HoughLinesP() output merging
for OpenCV 3
lines -- cv.HoughLinesP() output
img -- binary image
'''
lines_x = []
lines_y = []
# for every line of cv2.HoughLinesP()
for line_i in [l[0] for l in lines]:
orientation = self.get_orientation(line_i)
# if vertical
if 45 < orientation < 135:
lines_y.append(line_i)
else:
lines_x.append(line_i)
lines_y = sorted(lines_y, key=lambda line: line[1])
lines_x = sorted(lines_x, key=lambda line: line[0])
merged_lines_all = []
# for each cluster in vertical and horizantal lines leave only one line
for i in [lines_x, lines_y]:
if len(i) > 0:
groups = self.merge_lines_pipeline_2(i)
merged_lines = []
for group in groups:
merged_lines.append(self.merge_lines_segments1(group))
merged_lines_all.extend(merged_lines)
return merged_lines_all
class MeteorDetector:
def __init__(self, thread_name='Single thread'):
# In order to detect satellites (or planes), we need to compare
# the previous image and the current image.
#
# If one line in the previous image has the same/similar angel
# with one line in the current image, we may consider them as
# trails by satellite or plane.
# -- Do we need to have a threshold how faraway these two lines
# can be considered as separated events?
# This is a question needs further monitoring
#
# So the detection logic would be:
# 1) Detect lines in the first image (with subtraction from the 2nd
# image
# 2) Store the detection lines, but not generate the extracted images
# at this point
# 3) Detect lines for the 2nd image
# 4) Compare the detection lines in the previous image and those
# from the current image
# 5) If suspicious satellite detected:
# a) For the one belongs to the previous image, put it to the
# "previous_satellite"
# b) Put the satellite in the current image to the "current_satellite"
# 6) After checking done:
# a) For the previous image, generate a list with "previous_satellite"
# removed. And then generate the extracted images
# b) For the list in the current image:
# i. Current detection list -> self.previous_detection_lines
# ii. "current_satellite" -> self."previous_satellite"
self.Previous_Image_Detection_Lines = []
self.Previous_Image_Satellites = []
self.Previous_Image_Filename = ''
self.Previous_Image = []
self.Current_Image_Detection_Lines = []
self.Current_Image_Satellites = []
self.Thread_Name = thread_name
# When two lines have the similar angel, get the two closest points
# This is for next step to determine if these two lines are in the
# same line
def __get_most_close_two_points_from_two_lines_with_same_angel(self,
L1_x1, L1_y1, L1_x2, L1_y2,
L2_x1, L2_y1, L2_x2, L2_y2,
angel):
close_x1 = 0
close_y1 = 0
close_x2 = 0
close_y2 = 0
if angel >= 0:
if min(L1_x1, L1_x2) < min(L2_x1, L2_x2):
close_x1 = max(L1_x1, L1_x2)
close_y1 = max(L1_y1, L1_y2)
close_x2 = min(L2_x1, L2_x2)
close_y2 = min(L2_y1, L2_y2)
else:
close_x1 = min(L1_x1, L1_x2)
close_y1 = min(L1_y1, L1_y2)
close_x2 = max(L2_x1, L2_x2)
close_y2 = max(L2_y1, L2_y2)
else:
if min(L1_x1, L1_x2) < min(L2_x1, L2_x2):
close_x1 = max(L1_x1, L1_x2)
close_y1 = min(L1_y1, L1_y2)
close_x2 = min(L2_x1, L2_x2)
close_y2 = max(L2_y1, L2_y2)
else:
close_x1 = min(L1_x1, L1_x2)
close_y1 = max(L1_y1, L1_y2)
close_x2 = max(L2_x1, L2_x2)
close_y2 = min(L2_y1, L2_y2)
return close_x1, close_y1, close_x2, close_y2
# Even though we pass he angle as parameter here, we expect the
# angles of these two lines are already compared and are quite
# closed, can be considered as equal
def __calculate_two_parallel_lines_distance(self, L1_x_mid, L1_y_mid, L2_x_mid, L2_y_mid, angle):
angle_mid = math.atan2((L2_y_mid - L1_y_mid), (L2_x_mid - L1_x_mid))
# To ensure the angle range is (-pi/2, pi/2)
if angle_mid > np.pi / 2:
angle_mid = angle_mid - np.pi
if angle_mid < -np.pi / 2:
angle_mid = np.pi + angle_mid
angle_mid = abs(angle_mid)
angle_mid_to_line = abs(angle_mid - abs(angle))
dist_mid = math.sqrt((L2_x_mid - L1_x_mid) ** 2 + (L2_y_mid - L1_y_mid) ** 2)
vertical_dist = dist_mid * math.sin(angle_mid_to_line)
return vertical_dist
'''
# Don't use this.
# A short line in a big photo image could cause the bias value
# of the line function (y=ax + b), varies too much
def __calculate_two_parallel_lines_distance(self, L1_x1, L1_y1, L1_x2, L1_y2, L2_x1, L2_y1, L2_x2, L2_y2, angle):
if L1_x1 == L1_x2 or L2_x1 == L2_x2:
# Vertical lines
return abs(L2_x1 - L1_x1)
if L1_y1 == L1_y2 or L1_y1 == L2_y2:
# Horizontal lines
return abs(L2_y1 - L1_y1)
bias_1 = ((L1_x2 * L1_y1) - (L1_x1 * L1_y2)) / (L1_x2 - L1_x1)
bias_2 = ((L2_x2 * L2_y1) - (L2_x1 * L2_y2)) / (L2_x2 - L2_x1)
dist = abs((bias_1 - bias_2) * math.cos(angle))
return dist
'''
# A. Some times one line could be recognized as two
# We want to get them merged to be one
# B. Some times lines from two images could belong to the same satellite
# We want to distinguish them
#
# Criteria:
# 1) Angle is almost the same
# 2) Vertical distance is very close
# 3) No overlap, like these:
# --------
# -------
# Only accept lines like these:
# -------- -----
# 4) The distance between the two closest points
# is within threshold
def __decide_if_two_lines_should_belong_to_the_same_object(self,
L1_x1, L1_y1, L1_x2, L1_y2,
L2_x1, L2_y1, L2_x2, L2_y2,
for_satellite=False):
angle_L1 = math.atan2((L1_y2 - L1_y1), (L1_x2 - L1_x1))
# To ensure the angle range is (-pi/2, pi/2)
if angle_L1 > np.pi / 2:
angle_L1 = angle_L1 - np.pi
if angle_L1 < -np.pi / 2:
angle_L1 = np.pi + angle_L1
angle_L2 = math.atan2((L2_y2 - L2_y1), (L2_x2 - L2_x1))
if angle_L2 > np.pi / 2:
angle_L2 = angle_L2 - np.pi
if angle_L2 < -np.pi / 2:
angle_L2 = np.pi + angle_L2
# Sometimes the two lines' direction is similar, but the angles
# are reverted, like one is -80 deg and one is +80 deg.
# In this case the delta should be calculated as 20 deg, not 160
angle_delta = abs(angle_L1 - angle_L2)
if angle_delta > np.pi/2:
angle_delta = np.pi - angle_delta
# if abs(angle_L1 - angle_L2) > settings.LINE_ANGEL_DELTA_THRESHOLD:
if angle_delta > settings.LINE_ANGEL_DELTA_THRESHOLD:
# Angle delta is too much
return False
L1_x_mid = int((L1_x1 + L1_x2) / 2)
L1_y_mid = int((L1_y1 + L1_y2) / 2)
L2_x_mid = int((L2_x1 + L2_x2) / 2)
L2_y_mid = int((L2_y1 + L2_y2) / 2)
angle_avg = (angle_L1 + angle_L2) / 2
# vertical_dist = self.__calculate_two_parallel_lines_distance(L1_x1, L1_y1, L1_x2, L1_y2,
# L2_x1, L2_y1, L2_x2, L2_y2,
# angle_avg)
vertical_dist = self.__calculate_two_parallel_lines_distance(L1_x_mid, L1_y_mid, L2_x_mid, L2_y_mid, angle_avg)
if not for_satellite:
# Check lines in the same image
if vertical_dist > settings.LINE_VERTICAL_DISTANCE_FOR_MERGE_THRESHOLD:
# Can only be considered as two parallel lines
# Not to merge
return False
else:
# Check line in different images for satellite detection
if vertical_dist > settings.LINE_VERTICAL_DISTANCE_FOR_SATELLITE_THRESHOLD:
# Can only be considered as two parallel lines
# Not to merge
return False
# Checking for overlap
b_overlap = False
if min(L1_y1, L1_y2) < min(L2_y1, L2_y2):
if max(L1_y2, L1_y2) > min(L2_y1, L2_y2):
# return False
b_overlap = True
if min(L1_y1, L1_y2) > min(L2_y1, L2_y2):
if min(L1_y1, L1_y2) < max(L2_y1, L2_y2):
# return False
b_overlap = True
if min(L1_x1, L1_x2) < min(L2_x1, L2_x2):
if max(L1_x1, L1_x2) > min(L2_x1, L2_x2):
# return False
b_overlap = True
if min(L1_x1, L1_x2) > min(L2_x1, L2_x2):
if min(L1_x1, L1_x2) < max(L2_x1, L2_x2):
# return False
b_overlap = True
# If there's overlap, but are very close, merge them as well
# In this case we don't need to calculate the closest two points
# Just return true to merge them
if b_overlap and vertical_dist <= settings.LINE_VERTICAL_DISTANCE_FOR_MERGE_W_OVERLAP_THRESHOLD:
return True
# Finally, check the most close two points
close_x1, close_y1, close_x2, close_y2 = \
self.__get_most_close_two_points_from_two_lines_with_same_angel(L1_x1, L1_y1, L1_x2, L1_y2,
L2_x1, L2_y1, L2_x2, L2_y2,
angle_avg)
dist_close = math.sqrt((close_x2 - close_x1) ** 2 + (close_y2 - close_y1) ** 2)
if not for_satellite:
if dist_close > settings.LINE_DISTANCE_FOR_MERGE_THRESHOLD:
return False
else:
if dist_close > settings.LINE_DISTANCE_FOR_SATELLITE_THRESHOLD:
return False
# All checking passed
return True
# The initial detection lines need some filtering/processing:
# 1) There would be some false detection due to the original
# image border, with image rotated by star-alignment
# 2) Some lines could be detected as two, or more. Try to
# merge them back to one
def detection_lines_filtering(self, detection_lines, orig_image):
filtered_false_detection = []
height, width, channels = orig_image.shape
# Step 1: Check if the lines are due to false detection
# from the border (original image border)
for i, line in enumerate(detection_lines):
x1 = line[0][0]
y1 = line[0][1]
x2 = line[1][0]
y2 = line[1][1]
# cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
# In some cases the image border could cause some
# false detection. Remove these
if abs(x1 - x2) < settings.LINE_X_OR_Y_DELTA_THRESHOLD:
if min(x1, x2) <= settings.DETECTION_IMAGE_BORDER_THRESHOLD \
or max(x1, x2) >= width - settings.DETECTION_IMAGE_BORDER_THRESHOLD:
# ignore this line, should be border
continue
if abs(y1 - y2) < settings.LINE_X_OR_Y_DELTA_THRESHOLD:
if min(y1, y2) <= settings.DETECTION_IMAGE_BORDER_THRESHOLD \
or max(y1, y2) >= height - settings.DETECTION_IMAGE_BORDER_THRESHOLD:
# ignore this line, should be border
continue
# cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
# For images got rotated (for star-alignment propose), the original
# edge of the image could cause some false detection
#
# This method is to get the color of some pixels around the center of
# the detected line. If one pixel is all dark ([0,0,0]), then this
# line should be the original edge
x_mid = int((x1 + x2) / 2)
y_mid = int((y1 + y2) / 2)
# (x_mid, y_mid - 5), (x_mid, y_mid + 5)
# (x_mid - 5, y_mid), (x_mid + 5, y_mid)
# image[y, x, c]
# line_center_radius_checking = 20
color_up = orig_image[max(y_mid - settings.LINE_CENTER_RADIUS_CHECKING, 0), x_mid]
color_down = orig_image[min(y_mid + settings.LINE_CENTER_RADIUS_CHECKING, height - 1), x_mid]
color_left = orig_image[y_mid, max(x_mid - settings.LINE_CENTER_RADIUS_CHECKING, 0)]
color_right = orig_image[y_mid, min(x_mid + settings.LINE_CENTER_RADIUS_CHECKING, width - 1)]
# Initially we want to check if the color is [0,0,0]
# However it proved that if the image just rotated a little bit, then
# we cannot go very far away of the border, then sometimes the value
# is not fully ZERO.
# It could be like [2,0,0], [0,0,1], ...
#
# Let's set a threshold like [3,3,3]
boo_up = color_up[0] < settings.DETECTION_BORDER_COLOR_THRESHOLD \
and color_up[1] < settings.DETECTION_BORDER_COLOR_THRESHOLD \
and color_up[2] < settings.DETECTION_BORDER_COLOR_THRESHOLD
boo_down = color_down[0] < settings.DETECTION_BORDER_COLOR_THRESHOLD \
and color_down[1] < settings.DETECTION_BORDER_COLOR_THRESHOLD \
and color_down[2] < settings.DETECTION_BORDER_COLOR_THRESHOLD
boo_left = color_left[0] < settings.DETECTION_BORDER_COLOR_THRESHOLD \
and color_left[1] < settings.DETECTION_BORDER_COLOR_THRESHOLD \
and color_left[2] < settings.DETECTION_BORDER_COLOR_THRESHOLD
boo_right = color_right[0] < settings.DETECTION_BORDER_COLOR_THRESHOLD \
and color_right[1] < settings.DETECTION_BORDER_COLOR_THRESHOLD \
and color_right[2] < settings.DETECTION_BORDER_COLOR_THRESHOLD
# if np.any(color_up) == 0 or np.any(color_down) == 0
# or np.any(color_left) == 0 or np.any(color_right) == 0:
if boo_up or boo_down or boo_left or boo_right:
# Should be at the original image edge
# (due to image rotation by star-alignment)
# Ignore this one
# print("Original image edge. Ignored.")
continue
# Calculate the angel of the line, to help the merging in next step
# The angel is calculated in rad
angle = 0
# if x1 == x2:
# # angle = 90
# angle = math.pi/2
# else:
# angle = math.atan(abs(y2-y1)/abs(x2-x1))
#
# The atan2() can handle the case when (x2-x1)==0
# The angel value can be positive or negative to
# stand for different direction
angle = math.atan2((y2 - y1), (x2 - x1))
# To ensure the angle range is (-pi/2, pi/2)
if angle > np.pi / 2:
angle = angle - np.pi
if angle < -np.pi / 2:
angle = np.pi + angle
filtered_false_detection.append([x1, y1, x2, y2, x_mid, y_mid, angle])
# filtered_false_detection.append([x1, y1, x2, y2, angle])
# Step 2:
# For some real lines, they could be recognized as two short ones during detection
# Try to merge these back to one
# The method is:
# 1) Calculate the angel of the line (done)
# 2) If two lines have the similar angel, then get the two most close points
# a) If the distance of these two points are close enough, and
# b) If the angle of these two points is also similar to the angles
# of the original two lines
# then consider these two lines can be merged
merged_detection = []
# for i in range(len(filtered_false_detection)-1):
for i in range(len(filtered_false_detection)):
angle_1 = filtered_false_detection[i][6]
if angle_1 == -3.14:
# Such value was filled by below algorithm
# Skip this line
continue
for j in range(i + 1, len(filtered_false_detection)):
angle_2 = filtered_false_detection[j][6]
if angle_2 == -3.14:
continue
i_x1 = filtered_false_detection[i][0]
i_y1 = filtered_false_detection[i][1]
i_x2 = filtered_false_detection[i][2]
i_y2 = filtered_false_detection[i][3]
j_x1 = filtered_false_detection[j][0]
j_y1 = filtered_false_detection[j][1]
j_x2 = filtered_false_detection[j][2]
j_y2 = filtered_false_detection[j][3]
if self.__decide_if_two_lines_should_belong_to_the_same_object(i_x1, i_y1, i_x2, i_y2,
j_x1, j_y1, j_x2, j_y2,
for_satellite=False):
merged_x1 = 0
merged_y1 = 0
merged_x2 = 0
merged_y2 = 0
if angle_1 >= 0:
merged_x1 = min(i_x1, i_x2, j_x1, j_x2)
merged_y1 = min(i_y1, i_y2, j_y1, j_y2)
merged_x2 = max(i_x1, i_x2, j_x1, j_x2)
merged_y2 = max(i_y1, i_y2, j_y1, j_y2)
else:
merged_x1 = min(i_x1, i_x2, j_x1, j_x2)
merged_y1 = max(i_y1, i_y2, j_y1, j_y2)
merged_x2 = max(i_x1, i_x2, j_x1, j_x2)
merged_y2 = min(i_y1, i_y2, j_y1, j_y2)
# merged_detection.append([merged_x1, merged_y1, merged_x2, merged_y2, angle_1])
# The merged line to be updated to filtered_false_detection[i]
# And filtered_false_detection[j] is removed
new_angel = (angle_1 + angle_2) / 2
x_mid_merged = int((merged_x1 + merged_x2) / 2)
y_mid_merged = int((merged_y1 + merged_y2) / 2)
filtered_false_detection[i][0] = merged_x1
filtered_false_detection[i][1] = merged_y1
filtered_false_detection[i][2] = merged_x2
filtered_false_detection[i][3] = merged_y2
filtered_false_detection[i][4] = x_mid_merged
filtered_false_detection[i][5] = y_mid_merged
filtered_false_detection[i][6] = new_angel
filtered_false_detection[j][0] = 0
filtered_false_detection[j][1] = 0
filtered_false_detection[j][2] = 0
filtered_false_detection[j][3] = 0
filtered_false_detection[j][4] = 0
filtered_false_detection[j][5] = 0
# Use such an angle to indicate this is removed
filtered_false_detection[j][6] = -3.14
# End of the j for loop
# One entry in the i for loop has completely matched with others
merged_detection.append([filtered_false_detection[i][0],
filtered_false_detection[i][1],
filtered_false_detection[i][2],
filtered_false_detection[i][3],
filtered_false_detection[i][4],
filtered_false_detection[i][5],
filtered_false_detection[i][6]
])
return merged_detection
# Get the two points coordinators for a square that can hold
# the detected meteor image
def get_box_coordinate_from_detected_line(self, x1, y1, x2, y2, img_width, img_height, factor=1):
sample_width = abs(x2 - x1)
sample_height = abs(y2 - y1)
x_midpoint = int((x1 + x2) / 2)
y_midpoint = int((y1 + y2) / 2)
# if sample_width < square_size:
# increase the area with a factor. Normally 1.5
# sample_width = int(sample_width * settings.DETECTION_CROP_IMAGE_BOX_FACTOR)
sample_width = int(sample_width * factor)
# sample_height = int(sample_height * settings.DETECTION_CROP_IMAGE_BOX_FACTOR)
sample_height = int(sample_height * factor)
# The size can be at least 640 x 640
# If the detected line size exceeds 640 pixels, the
# draw box size can be enlarged accordingly
draw_size = max(sample_width, sample_height, settings.DETECTION_CROP_IMAGE_BOX_SIZE)
# draw_size = max(draw_size, settings.DETECTION_CROP_IMAGE_BOX_SIZE)
# And it should not exceed the image size
# This is for some exceptional cases
draw_size = min(draw_size, img_width-1, img_height-1)
draw_x1 = x_midpoint - int(draw_size / 2)
draw_x2 = x_midpoint + int(draw_size / 2)
# Just make it be the exactly the same
# size as the draw_size
if draw_x2 - draw_x1 < draw_size:
draw_x2 = draw_x1 + draw_size
draw_y1 = y_midpoint - int(draw_size / 2)
draw_y2 = y_midpoint + int(draw_size / 2)
if draw_y2 - draw_y1 < draw_size:
draw_y2 = draw_y1 + draw_size
# Detect if exceed the img size, or smaller than 0
if draw_x1 < 0:
draw_x2 = draw_x2 - draw_x1
draw_x1 = 0
if draw_x2 > img_width - 1:
draw_x1 = max(0, draw_x1 - (draw_x2 - img_width + 1))
# draw_x1 = draw_x1 - (draw_x2 - img_width + 1)
draw_x2 = img_width - 1
if draw_y1 < 0:
draw_y2 = draw_y2 - draw_y1
draw_y1 = 0
if draw_y2 > img_height - 1:
draw_y1 = max(0, draw_y1 - (draw_y2 - img_height + 1))
# draw_y1 = draw_y1 - (draw_y2 - img_height + 1)
draw_y2 = img_height - 1
return draw_x1, draw_y1, draw_x2, draw_y2
def get_combined_box_list_from_detected_lines(self, detection_lines, img_width, img_height):
# Step 1: Get the box list for each line
# Step 2: If two boxes have overlap, combine them (enlarged)
box_list = []
for line in detection_lines:
x1 = line[0]
y1 = line[1]
x2 = line[2]
y2 = line[3]
# 2020-7-4
# Also store the center coordinator of the detection line
# in the file name.
x_c = int((x1 + x2) / 2)
y_c = int((y1 + y2) / 2)
# cv2.line(draw_img, (x1, y1), (x2, y2), (0, 0, 255), 2)
box_x1, box_y1, box_x2, box_y2 = \
self.get_box_coordinate_from_detected_line(x1, y1, x2, y2, img_width, img_height,
factor=settings.DETECTION_CROP_IMAGE_BOX_FACTOR)
# The "True" value in the end is for next step usage
# When two boxes are merged, tag one as False, indicating
# it is to be removed
box_list.append([box_x1, box_y1, box_x2, box_y2, x_c, y_c, True])
combined_box_list = []
for i in range(len(box_list)):
tag = box_list[i][6]
if not tag:
# This has been merged, skip it
continue
# This algorithm still has bug, j should be started from 0
# as well, just skipping the case of i==j
# But if we get too many boxes merged together there could
# be another problem. The image area could be quite big,
# and could include both landscape objects and meteor objects.
for j in range(i + 1, len(box_list)):
tag = box_list[j][6]
if not tag:
# This has been merged, skip it
continue
# Because the value in the box_list[i] could be
# updated, need to get the values from each loop
i_x1 = box_list[i][0]
i_y1 = box_list[i][1]
i_x2 = box_list[i][2]
i_y2 = box_list[i][3]
i_x_mid = int((i_x1 + i_x2) / 2)
i_y_mid = int((i_y1 + i_y2) / 2)
i_width = abs(i_x2 - i_x1)
j_x1 = box_list[j][0]
j_y1 = box_list[j][1]
j_x2 = box_list[j][2]
j_y2 = box_list[j][3]
j_x_mid = int((j_x1 + j_x2) / 2)
j_y_mid = int((j_y1 + j_y2) / 2)
j_width = abs(j_x2 - j_x1)
# Compare the center distance in each dimension
# If the distances in both dimensions are all
# less than the two boxes' width/2 (sum up), then
# it is a overlap
center_dist_x = abs(j_x_mid - i_x_mid)
center_dist_y = abs(j_y_mid - i_y_mid)
if center_dist_x < (i_width + j_width)/2 * settings.BOX_OVERLAP_THRESHOLD\
and center_dist_y < (i_width + j_width)/2 * settings.BOX_OVERLAP_THRESHOLD:
# Overlap detected
merged_x1 = min([i_x1, i_x2, j_x1, j_x2])
merged_y1 = min([i_y1, i_y2, j_y1, j_y2])
merged_x2 = max([i_x1, i_x2, j_x1, j_x2])
merged_y2 = max([i_y1, i_y2, j_y1, j_y2])
# Make it as a square, not a rectangle
# Parameter (factor = 1) means no extra extension
#
# merged_width = max(abs(merged_x2 - merged_x1), abs(merged_y2 - merged_y1))
merged_x1, merged_y1, merged_x2, merged_y2 = \
self.get_box_coordinate_from_detected_line(merged_x1,
merged_y1,
merged_x2,
merged_y2,
img_width,
img_height,
factor=1)
box_list[i][0] = merged_x1
box_list[i][1] = merged_y1
box_list[i][2] = merged_x2
box_list[i][3] = merged_y2
# 2020-7-4
# No update to the line center coordinator at this time
# See what result would be to decide how to handle that
# later on.
box_list[j][0] = 0
box_list[j][1] = 0
box_list[j][2] = 0
box_list[j][3] = 0
box_list[j][6] = False
# End of the j loop
# One entry in the i for loop has completely merged with others
combined_box_list.append([box_list[i][0], box_list[i][1], box_list[i][2], box_list[i][3],
box_list[i][4], box_list[i][5]])
return combined_box_list
def detect_meteor_from_image(self, detection_img, original_img, equatorial_mount=False):
# To ensure we have an odd value for he kernel size
if equatorial_mount:
blur_kernel_size = settings.DETECTION_BLUR_KERNEL_SIZE_FOR_EQUATORIAL_MOUNTED_IMAGES
else:
# Images taken on fixed tripod. Even if star-align performed,
# stars at the edges are still distorted, and can cause many
# false detection.
# So in this case he BLUR kernel size needs to be larger
blur_kernel_size = settings.DETECTION_BLUR_KERNEL_SIZE_FOR_FIXED_TRIPOD_IMAGES
count = blur_kernel_size % 2
if count == 0:
blur_kernel_size += 1
blur_img = cv2.GaussianBlur(detection_img, (blur_kernel_size, blur_kernel_size), 0)
# blur_img_enh = cv2.GaussianBlur(enhanced_img, (blur_kernel_size, blur_kernel_size), 0)
canny_lowThreshold = settings.DETECTION_CANNY_LOW_THRESHOLD
canny_ratio = settings.DETECTION_CANNY_RATIO
canny_kernel_size = settings.DETECTION_CANNY_KERNEL_SIZE
detected_edges = cv2.Canny(blur_img,
canny_lowThreshold,
canny_lowThreshold * canny_ratio,
apertureSize=canny_kernel_size)
line_threshold = settings.DETECTION_LINE_THRESHOLD
minLineLength = settings.DETECTION_LINE_MIN_LINE_LENGTH
maxLineGap = settings.DETECTION_LINE_MAX_LINE_GAP
lines = cv2.HoughLinesP(image=detected_edges,
rho=1,
theta=np.pi / 180,
threshold=line_threshold,
minLineLength=minLineLength,
maxLineGap=maxLineGap)
if not (lines is None):
my_HoughBundler = HoughBundler()
# Merge those lines that are very closed
# Should consider them as just one
#
# The format for each "line" element in the list would be like this:
# [array([1565, 3099], dtype=int32), array([1663, 2986], dtype=int32)]
#
# And can be accessed in this way:
#
# line[0] = [1565 3099]
# line[1] = [1663 2986]
#
# x1 = line[0][0]
# y1 = line[0][1]
# x2 = line[1][0]
# y2 = line[1][1]
merged_lines = my_HoughBundler.process_lines(lines=lines)
# Remove some false detection.
# And merging some lines which could belong to be same one.
# The original image is needed (for some color detection)
filtered_lines = self.detection_lines_filtering(merged_lines, original_img)
return filtered_lines
else:
return None
def draw_detection_boxes_on_image(self, original_img, detection_lines, color):
# Get the detected lines coordinates
# detection_lines = self.detect_meteor_from_image(original_img)
draw_img = copy.copy(original_img)
height, width, channels = draw_img.shape
# box_list = self.get_box_list_from_meteor_lines(detection_lines, width, height)
box_list = self.get_combined_box_list_from_detected_lines(detection_lines, width, height)
# print(box_list)
for line in detection_lines:
# the format for "line" here would be like this:
# [array([1565, 3099], dtype=int32), array([1663, 2986], dtype=int32)]
# line[0] = [1565 3099]
# line[1] = [1663 2986]
# x1 = line[0][0]
# y1 = line[0][1]
# x2 = line[1][0]
# y2 = line[1][1]
x1 = line[0]
y1 = line[1]
x2 = line[2]
y2 = line[3]
angle = line[6]
cv2.line(draw_img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.putText(draw_img, '{0:.3f}'.format(angle * 180 / np.pi),
(x2 + 10, y2 + 10),
cv2.FONT_HERSHEY_SIMPLEX,
fontScale=3,
color=(255, 255, 255),
lineType=2)
# draw_x1, draw_y1, draw_x2, draw_y2 = \
# self.get_box_coordinate_from_meteor_line(x1, y1, x2, y2, width, height)
# cv2.rectangle(draw_img, (draw_x1, draw_y1), (draw_x2, draw_y2), (255, 255, 0), 1)
for box in box_list:
# for draw_x1, draw_y1, draw_x2, draw_y2 in box:
box_x1 = box[0]
box_y1 = box[1]
box_x2 = box[2]
box_y2 = box[3]
# cv2.rectangle(draw_img, (box_x1, box_y1), (box_x2, box_y2), (255, 255, 0), 1)
cv2.rectangle(draw_img, (box_x1, box_y1), (box_x2, box_y2), color, 1)
return draw_img
# The "orig_filename" parameter should not have path info,
# just the file name like "xxxx.jpg"
#
# The extracted image will be saved with file name like this:
# ER4A3109_0001_pos_(02194,02421)_(02834,03061).JPG
# ER4A3109_size_(05437,03625)_0001_pos_(02194,02421)_(02834,03061).JPG
# The size(x,y) is the original size of the photo
# The pos (x1, y1) (x2, y2) position from original image is kept in the file name
#
def extract_meteor_images_to_file(self, original_img, detection_lines, save_dir, orig_filename, verbose):
# When calling this function, need to do such validation:
# if not (detection_lines is None):
# ...
height, width, channels = original_img.shape
# box_list = self.get_box_list_from_meteor_lines(detection_lines, width, height)
box_list = self.get_combined_box_list_from_detected_lines(detection_lines, width, height)
i = 0
# filename_no_ext = os.path.splitext(orig_filename)[0]
filename_no_ext, file_ext = os.path.splitext(orig_filename)
for box in box_list:
# for draw_x1, draw_y1, draw_x2, draw_y2 in box:
box_x1 = box[0]
box_y1 = box[1]
box_x2 = box[2]
box_y2 = box[3]
# 2020-7-4
# Also store the line center coordinate to the file name
line_x_center = box[4]
line_y_center = box[5]
crop_img = original_img[box_y1:box_y2, box_x1:box_x2]
i += 1
# Let the file name to contain the position info
# So as to know where it is from the original image
file_to_save = filename_no_ext +\
"_size_({:05d},{:05d})_{:04d}_pos_({:05d},{:05d})_({:05d},{:05d})_center_({:05d},{:05d})".\
format(width, height, i, box_x1, box_y1, box_x2, box_y2, line_x_center, line_y_center) +\
file_ext
file_to_save = os.path.join(save_dir, file_to_save)
if verbose:
print("{}: saving {} ...".format(self.Thread_Name, file_to_save))
# cv2.imwrite(file_to_save, crop_img)
# To solve the Chinese character support issue in OpenCV PY
cv2.imencode(file_ext, crop_img)[1].tofile(file_to_save)
# End of function
# Logic:
# 1) For each element in the self.Previous_Image_Detection_List,
# compare with all items in the self.Current_Image_Detection_List
# 2) If the two have similar angles, and
# if the line between them, connecting the most closed two points
# have the similar angle, then consider these two are from the
# same satellite object
def check_satellite_with_previous_detection_list(self, verbose):
for previous_line in self.Previous_Image_Detection_Lines:
p_x1 = previous_line[0]
p_y1 = previous_line[1]
p_x2 = previous_line[2]
p_y2 = previous_line[3]
p_x_mid = previous_line[4]
p_y_mid = previous_line[5]
p_angle = previous_line[6]
for current_line in self.Current_Image_Detection_Lines: