This repository has been archived by the owner on Apr 11, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 34
/
config.ini.sample
92 lines (70 loc) · 3.11 KB
/
config.ini.sample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# This is an example of how you can mix config values with command line values
# This file contains arguments you may want to reuse across multiple requests
# Simply do a -c <this config file> and add any other parameters you want in CLI
# and they will be merged
username=admin
password=password
portal=https://server/zm
#If True, will first download videos and then locally process
download=True
#If True, will show progress bars
show_progress=False
# If True, only extract events that have object detection previously
# identified using zmeventserver machine learning hooks
objectonly=False
# If True, opens a useful graphical window to show whats being detected
# and how. Needs X
display=False
# Only extracts events that have at least one alarmed frame
alarmonly=True
# if alarmonly is true, will only get events with these many minimum alarmed frames
minalarmframes=2
# Writes frames only if the detection logic said it found something
relevantonly=True
# Minimum area the object that moves must occupy. Only relevant
# if you selected mixed or extract_background mode
minblendarea=500
# If you use GPU, you have two options:
# Use OpenCV's new GPU DNN support (requires OpenCV 4.2+)
# or, you need to have darknet compiled in GPU mode
# and point darknet_lib to where that compiled SO is placed
gpu=False
#If true, will ignore darknet_lib
use_opencv_dnn_cuda=True
darknet_lib=/home/pp/fiddle/zmMagik/libdarknet_gpu.so
# If you want detections to lie inside a specified polygon
# Note that only one mask is supported for now
# doorbell
#mask=4,406 1276,411 1279,719 0,719
#driveway
#mask=69,130 604,68 646,307 1167,616 0,619
# I've found it useful to manually specific fps of videos being processed
# If you don't I'll try and guess, but its a bit random in my experience.
# Try without it. If you get accurate results, you can keep this commented
fps=5
# How many frames will be skipped during processing. Use to make things faster
# 1 means no frames skipped
skipframes=1
# Used only for background_extraction mode. Keep this to 0.7
threshold=0.7
# Used by Yolo, if you use yolo or mixed mode. This is the minimum confidence level
# of matched objects (keep it to 0.5 to 0.9)
confidence=0.5
# How much to resize images. No need to feed in huge frames.
resize = 0.5
# If enabled, it tries to histogram match colors of of new event
# to blended events. I tried this while blending day and night events. It adjusts
# to the higher intensity (day). Not sure it really works well.
balanceintensity=False
#balanceintensity=True
# size of text that is written on top of objects
fontscale=0.8
# First uses opencv background extraction to detect if an object is moving. If it is moving, then run yolo on it to make sure its something that is valid
detection_type=mixed
detectpattern = (person|car|motorbike|bus|truck|boat)
#detection_type=yolo_extraction
#detection_type=background_extraction
# you need these if you use yolo or mixed for detection
config_file=/var/lib/zmeventnotification/models/yolov3/yolov3.cfg
weights_file=/var/lib/zmeventnotification/models/yolov3/yolov3.weights
labels_file=/var/lib/zmeventnotification/models/yolov3/yolov3_classes.txt