forked from ZoneMinder/mlapi
-
Notifications
You must be signed in to change notification settings - Fork 3
/
mlapiconfig.ini
394 lines (327 loc) · 13.4 KB
/
mlapiconfig.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
[general]
# This is an optional file
# If specified, you can specify tokens with secret values in that file
# and onlt refer to the tokens in your main config file
#secrets=./secrets.ini
secrets=/etc/zm/secrets.ini
# portal/user/password are needed if you plan on using ZM's
# auth mechanism to get images
portal=!ZM_PORTAL
user=!ZM_USER
password=!ZM_PASSWORD
#basic_auth_user=username
#basic_auth_password=password
# api portal is needed if you plan to use tokens to get images
# requires ZM 1.33 or above
api_portal=!ZM_API_PORTAL
# make this no, if you don't plan to use auth. Default is yes.
auth_enabled=yes
# port that mlapi will listen on. Default 5000
port=5000
# Maximum # of processes that will be forked
# to handle requests. Note that each process will
# have its own copy of the model, so memory can
# build up very quickly
# This number also dictates how many requests will be executed in parallel
# The rest will be queued
# default: flask
wsgi_server=bjoern
# if yes, will use ZM logs. Default no
#use_zm_logs=no
use_zm_logs=yes
pyzm_overrides={'log_level_debug':5}
# If you are using bjoern, processes is always 1
# For now, keep this to 1 if you are on a GPU
processes=1
# the secret key that will be used to sign
# JWT tokens. Make sure you change the value
# in your secrets.ini
mlapi_secret_key=!MLAPI_SECRET_KEY
# base data path for various files the ES+OD needs
# we support in config variable substitution as well
base_data_path=/var/lib/zmeventnotification
#base_data_path=.
# folder where images will be uploaded
# default ./images
images_path={{base_data_path}}/images
# folder where the user DB will be stored
db_path=./db
# If yes, will allow connections to self signed certificates
# Default yes
allow_self_signed=yes
# You can now limit the # of detection process
# per target processor. If not specified, default is 1
# Other detection processes will wait to acquire lock
cpu_max_processes=3
tpu_max_processes=1
gpu_max_processes=1
# NEW: Time to wait in seconds per processor to be free, before
# erroring out. Default is 120 (2 mins)
cpu_max_lock_wait=120
tpu_max_lock_wait=120
gpu_max_lock_wait=120
model_sequence=object,face,alpr
# If yes, will import zm zones defined for monitors. Default is no
#import_zm_zones=yes
# If enabled, will only filter zone names that match the alarm cause
# This is useful if you only want to report detections where motion
# was detected by ZM. Default no
#only_triggered_zm_zones=no
# if yes, last detection will be stored for monitors
# and bounding boxes that match, along with labels
# will be discarded for new detections. This may be helpful
# in getting rid of static objects that get detected
# due to some motion.
match_past_detections=no
# The max difference in area between the objects if match_past_detection is on
# can also be specified in px like 300px. Default is 5%. Basically, bounding boxes of the same
# object can slightly differ ever so slightly between detection. Contributor @neillbell put in this PR
# to calculate the difference in areas and based on his tests, 5% worked well. YMMV. Change it if needed.
# Note: You can specify label/object specific max_diff_areas as well. If present, they override this value
# example:
# person_past_det_max_diff_area=5%
# car_past_det_max_diff_area=5000px
past_det_max_diff_area=5%
# this is the maximum size a detected object can have. You can specify it in px or % just like past_det_max_diff_area
# This is pretty useful to eliminate bogus detection. In my case, depending on shadows and other lighting conditions,
# I sometimes see "car" or "person" detected that covers most of my driveway view. That is practically impossible
# and therefore I set mine to 70% because I know any valid detected objected cannot be larger than that area
max_detection_size=90%
# config for object
[object]
# If you are using legacy format (use_sequence=no) then these parameters will
# be used during ML inferencing
#object_detection_pattern=.*
object_detection_pattern=(person|car|motorbike|bus|truck|boat)
object_min_confidence=0.3
object_framework=coral_edgetpu
object_processor=tpu
object_weights={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
# If you are using the new ml_sequence format (use_sequence=yes) then
# you can fiddle with these parameters and look at ml_sequence later
# Note that these can be named anything. You can add custom variables, ad-infinitum
# This is a useful debugging trick. If you are chaning models and want to know which
# model detected an object, make this yes. When yes, it will prefix the model name before the
# detected object. Example: Instead of 'person', it will say '(yolo) person'
show_models=no
# Google Coral
# The mobiledet model came out in Nov 2020 and is supposed to be faster and more accurate but YMMV
tpu_object_weights_mobiledet={{base_data_path}}/models/coral_edgetpu/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
tpu_object_weights_mobilenet={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
tpu_object_weights_yolov5={{base_data_path}}/models/coral_edgetpu/yolov5s-int8_edgetpu.tflite
tpu_object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
tpu_object_framework=coral_edgetpu
tpu_object_processor=tpu
tpu_min_confidence=0.6
# Yolo v4 on GPU (falls back to CPU if no GPU)
yolo4_object_weights={{base_data_path}}/models/yolov4/yolov4.weights
yolo4_object_labels={{base_data_path}}/models/yolov4/coco.names
yolo4_object_config={{base_data_path}}/models/yolov4/yolov4.cfg
yolo4_object_framework=opencv
yolo4_object_processor=gpu
# Yolo v3 on GPU (falls back to CPU if no GPU)
yolo3_object_weights={{base_data_path}}/models/yolov3/yolov3.weights
yolo3_object_labels={{base_data_path}}/models/yolov3/coco.names
yolo3_object_config={{base_data_path}}/models/yolov3/yolov3.cfg
yolo3_object_framework=opencv
yolo3_object_processor=gpu
# Tiny Yolo V4 on GPU (falls back to CPU if no GPU)
tinyyolo_object_config={{base_data_path}}/models/tinyyolov4/yolov4-tiny.cfg
tinyyolo_object_weights={{base_data_path}}/models/tinyyolov4/yolov4-tiny.weights
tinyyolo_object_labels={{base_data_path}}/models/tinyyolov4/coco.names
tinyyolo_object_framework=opencv
tinyyolo_object_processor=gpu
[face]
# NOTE: None of these are used if use_sequence is enabled. Ig enabled
# only values in ml_sequence are processed
face_detection_framework=dlib
face_recognition_framework=dlib
face_num_jitters=0
face_upsample_times=1
face_model=cnn
face_train_model=cnn
face_recog_dist_threshold=0.6
face_recog_knn_algo=ball_tree
known_images_path={{base_data_path}}/known_faces
unknown_images_path={{base_data_path}}/unknown_faces
unknown_face_name=unknown face
save_unknown_faces=yes
save_unknown_faces_leeway_pixels=50
[alpr]
# NOTE: None of these are used if use_sequence is enabled. Ig enabled
# only values in ml_sequence are processed
alpr_use_after_detection_only=yes
alpr_api_type=cloud
# -----| If you are using plate recognizer | ------
alpr_service=plate_recognizer
alpr_key=!PLATEREC_ALPR_KEY
platerec_stats=yes
#platerec_regions=['us','cn','kr']
platerec_min_dscore=0.1
platerec_min_score=0.2
# ----| If you are using openALPR |-----
#alpr_service=open_alpr
#alpr_key=!OPENALPR_ALPR_KEY
#openalpr_recognize_vehicle=1
#openalpr_country=us
#openalpr_state=ca
# openalpr returns percents, but we convert to between 0 and 1
#openalpr_min_confidence=0.3
# ----| If you are using openALPR command line |-----
openalpr_cmdline_binary=alpr
openalpr_cmdline_params=-j -d
openalpr_cmdline_min_confidence=0.3
## Monitor specific settings
# You can override any parameter on a per monitor basis
# The format is [monitor-N] where N is the monitor id
[monitor-9998]
# doorbell
model_sequence=face
object_detection_pattern=(person|monitor_doorbell)
valid_face_area=184,235 1475,307 1523,1940 146,1940
match_past_detections=yes
[monitor-9999]
#deck
object_detection_pattern=(person|monitor_deck)
stream_sequence = {
'frame_strategy': 'most_models',
'frame_set': 'alarm',
'contig_frames_before_error': 5,
'max_attempts': 3,
'sleep_between_attempts': 4,
'resize':800
}
[ml]
# if enabled, will not grab exclusive locks before running inferencing
# locking seems to cause issues on some unique file systems
disable_locks = no
my_frame_strategy = most_models
use_sequence = yes
stream_sequence = {
'frame_strategy': '{{my_frame_strategy}}',
'frame_set': 'snapshot,alarm',
'contig_frames_before_error': 5,
'max_attempts': 3,
'sleep_between_attempts': 4,
'resize':800,
# if yes, will convert 'snapshot' to a specific frame id
# This is useful because you may see boxes drawn at the wrong places when using mlapi
# This is because when mlapi detects an image, a 'snapshot' could point to, say, frame 45
# But when zm_detect gets the detections back and draws the boxes, snapshot could have moved
# to frame 50 (example). Enabling this makes sure mlapi tells zm_detect which frame id to use
# default is 'no'
'convert_snapshot_to_fid': 'yes',
} # very important - this brace needs to be indented inside stream_sequence
ml_sequence= {
'general': {
'model_sequence': '{{model_sequence}}',
'disable_locks': '{{disable_locks}}',
'match_past_detections': '{{match_past_detections}}',
'past_det_max_diff_area': '5%',
'car_past_det_max_diff_area': '10%',
#'ignore_past_detection_labels': ['dog', 'cat']
# when matching past detections, names in a group are treated the same
'aliases': [['car','bus','truck','boat'], ['broccoli', 'pottedplant']]
},
'object': {
'general':{
'pattern':'{{object_detection_pattern}}',
'same_model_sequence_strategy': 'most_unique', # also 'most', 'most_unique's
},
'sequence': [{
#First run on TPU with higher confidence
#'maxsize':320,
'name': 'TPU object detection',
'enabled': 'no',
'object_weights':'{{tpu_object_weights_mobiledet}}',
'object_labels': '{{tpu_object_labels}}',
'object_min_confidence': {{tpu_min_confidence}},
'object_framework':'{{tpu_object_framework}}',
'tpu_max_processes': {{tpu_max_processes}},
'tpu_max_lock_wait': {{tpu_max_lock_wait}},
'max_detection_size':'{{max_detection_size}}',
'show_models':'{{show_models}}',
},
{
# YoloV4 on GPU if TPU fails (because sequence strategy is 'first')
'name': 'CPU/GPU Yolov4 Object Detection',
'enabled': 'yes',
'object_config':'{{yolo4_object_config}}',
'object_weights':'{{yolo4_object_weights}}',
'object_labels': '{{yolo4_object_labels}}',
'object_min_confidence': {{object_min_confidence}},
'object_framework':'{{yolo4_object_framework}}',
'object_processor': '{{yolo4_object_processor}}',
'gpu_max_processes': {{gpu_max_processes}},
'gpu_max_lock_wait': {{gpu_max_lock_wait}},
'cpu_max_processes': {{cpu_max_processes}},
'cpu_max_lock_wait': {{cpu_max_lock_wait}},
'max_detection_size':'{{max_detection_size}}',
'match_past_detections': 'yes',
'past_det_max_diff_area': '5%',
'show_models':'{{show_models}}'
}]
},
'face': {
'general':{
'pattern': '{{face_detection_pattern}}',
#'pre_existing_labels': ['person'], # when put in general section, it will check if a previous detection type (like object) found this label
'same_model_sequence_strategy': 'union' # combine results below
},
'sequence': [
{
'name': 'Face Detection (TPU)',
'enabled': 'no', # make this yes if you want face detection with TPU first
'face_detection_framework': 'tpu',
'face_weights':'/var/lib/zmeventnotification/models/coral_edgetpu/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite',
'face_min_confidence': 0.3
},
{
'name':'Face Recognition (Dlib)', # optional
'enabled': 'yes', # optional
# 'pre_existing_labels': ['face'], # If you use TPU detection first, we can run this ONLY if TPU detects a face first
'save_unknown_faces':'{{save_unknown_faces}}',
'save_unknown_faces_leeway_pixels':{{save_unknown_faces_leeway_pixels}},
'face_detection_framework': '{{face_detection_framework}}',
'known_images_path': '{{known_images_path}}',
'unknown_images_path': '{{unknown_images_path}}',
'face_model': '{{face_model}}',
'face_train_model': '{{face_train_model}}',
'face_recog_dist_threshold': {{face_recog_dist_threshold}},
'face_num_jitters': {{face_num_jitters}},
'face_upsample_times':{{face_upsample_times}},
'gpu_max_processes': {{gpu_max_processes}},
'gpu_max_lock_wait': {{gpu_max_lock_wait}},
'cpu_max_processes': {{cpu_max_processes}},
'cpu_max_lock_wait': {{cpu_max_lock_wait}},
'max_size':800
}]
},
'alpr': {
'general':{
'same_model_sequence_strategy': 'first',
'pre_existing_labels':['car', 'motorbike', 'bus', 'truck', 'boat'],
'pattern': '{{alpr_detection_pattern}}'
},
'sequence': [{
'name': 'Platerecognizer Cloud Service',
'enabled': 'yes',
'alpr_api_type': '{{alpr_api_type}}',
'alpr_service': '{{alpr_service}}',
'alpr_key': '{{alpr_key}}',
'platrec_stats': '{{platerec_stats}}',
'platerec_min_dscore': {{platerec_min_dscore}},
'platerec_min_score': {{platerec_min_score}},
'max_size':1600,
#'platerec_payload': {
#'regions':['us'],
#'camera_id':12,
#},
#'platerec_config': {
# 'region':'strict',
# 'mode': 'fast'
#}
}]
}
} # very important - this brace needs to be indented inside ml_sequence