-
-
Notifications
You must be signed in to change notification settings - Fork 53
/
config-sample.ini
459 lines (331 loc) · 12.9 KB
/
config-sample.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
[capture]
; Device used to capture
devname =
; Pcap filename to run
pcapfile =
; dnstap socket path. Example: unix:///tmp/dnstap.sock, tcp://127.0.0.1:8080
dnstapsocket =
; Port selected to filter packets
port = 53
; Capture Sampling by a:b. eg sampleRatio of 1:100 will process 1 percent of the incoming packets
sampleratio = 1:1
; Cleans up packet hash table used for deduplication
dedupcleanupinterval = 1m0s
; Set the dnstap socket permission, only applicable when unix:// is used
dnstappermission = 755
; Number of routines used to handle received packets
packethandlercount = 2
; Size of the tcp assembler
tcpassemblychannelsize = 10000
; Size of the tcp result channel
tcpresultchannelsize = 10000
; Number of routines used to handle tcp packets
tcphandlercount = 1
; Size of the channel to send packets to be defragged
defraggerchannelsize = 10000
; Size of the channel where the defragged packets are returned
defraggerchannelreturnsize = 10000
; Size of the packet handler channel
packetchannelsize = 1000
; Afpacket Buffersize in MB
afpacketbuffersizemb = 64
; BPF filter applied to the packet stream. If port is selected, the packets will not be defragged.
filter = ((ip and (ip[9] == 6 or ip[9] == 17)) or (ip6 and (ip6[6] == 17 or ip6[6] == 6 or ip6[6] == 44)))
; Use AFPacket for live captures. Supported on Linux 3.0+ only
useafpacket = false
; The PCAP capture does not contain ethernet frames
noetherframe = false
; Deduplicate incoming packets, Only supported with --devName and --pcapFile. Experimental
dedup = false
; Do not put the interface in promiscuous mode
nopromiscuous = false
[clickhouse_output]
; Address of the clickhouse database to save the results. multiple values can be provided.
clickhouseaddress = localhost:9000
; Username to connect to the clickhouse database
clickhouseusername =
; Password to connect to the clickhouse database
clickhousepassword =
; Database to connect to the clickhouse database
clickhousedatabase = default
; Interval between sending results to ClickHouse. If non-0, Batch size is ignored and batch delay is used
clickhousedelay = 0s
; Clickhouse connection LZ4 compression level, 0 means no compression
clickhousecompress = 0
; Debug Clickhouse connection
clickhousedebug = false
; Use TLS for Clickhouse connection
clickhousesecure = false
; Save full packet query and response in JSON format.
clickhousesavefullquery = false
; What should be written to clickhouse. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
clickhouseoutputtype = 0
; Minimum capacity of the cache array used to send data to clickhouse. Set close to the queries per second received to prevent allocations
clickhousebatchsize = 100000
; Number of Clickhouse output Workers
clickhouseworkers = 1
; Channel Size for each Clickhouse Worker
clickhouseworkerchannelsize = 100000
[elastic_output]
; What should be written to elastic. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
elasticoutputtype = 0
; elastic endpoint address, example: http://127.0.0.1:9200. Used if elasticOutputType is not none
elasticoutputendpoint =
; elastic index
elasticoutputindex = default
; Send data to Elastic in batch sizes
elasticbatchsize = 1000
; Interval between sending results to Elastic if Batch size is not filled
elasticbatchdelay = 1s
[file_output]
; What should be written to file. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
fileoutputtype = 0
; Path to output folder. Used if fileoutputType is not none
fileoutputpath =
; Interval to rotate the file in cron format
fileoutputrotatecron = 0 0 * * *
; Number of files to keep. 0 to disable rotation
fileoutputrotatecount = 4
; Output format for file. options:json, csv, csv_no_header, gotemplate. note that the csv splits the datetime format into multiple fields
fileoutputformat = json
; Go Template to format the output as needed
fileoutputgotemplate = {{.}}
[influx_output]
; What should be written to influx. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
influxoutputtype = 0
; influx Server address, example: http://localhost:8086. Used if influxOutputType is not none
influxoutputserver =
; Influx Server Auth Token
influxoutputtoken = dnsmonster
; Influx Server Bucket
influxoutputbucket = dnsmonster
; Influx Server Org
influxoutputorg = dnsmonster
; Minimum capacity of the cache array used to send data to Influx
influxoutputworkers = 8
; Minimum capacity of the cache array used to send data to Influx
influxbatchsize = 1000
[kafka_output]
; What should be written to kafka. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
kafkaoutputtype = 0
; kafka broker address(es), example: 127.0.0.1:9092. Used if kafkaOutputType is not none
kafkaoutputbroker =
; Kafka topic for logging
kafkaoutputtopic = dnsmonster
; Minimum capacity of the cache array used to send data to Kafka
kafkabatchsize = 1000
; Output format. options:json, gob.
kafkaoutputformat = json
; Kafka connection timeout in seconds
kafkatimeout = 3
; Interval between sending results to Kafka if Batch size is not filled
kafkabatchdelay = 1s
; Compress Kafka connection
kafkacompress = false
; Compression Type Kafka connection [snappy gzip lz4 zstd]; default(snappy).
kafkacompressiontype = snappy
; Use TLS for kafka connection
kafkasecure = false
; Path of CA certificate that signs Kafka broker certificate
kafkacacertificatepath =
; Path of TLS certificate to present to broker
kafkatlscertificatepath =
; Path of TLS certificate key
kafkatlskeypath =
[parquet_output]
; What should be written to parquet file. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
parquetoutputtype = 0
; Path to output folder. Used if parquetoutputtype is not none
parquetoutputpath =
; Number of records to write to parquet file before flushing
parquetflushbatchsize = 10000
; Number of workers to write to parquet file
parquetworkercount = 4
; Size of the write buffer in bytes
parquetwritebuffersize = 256000
[psql_output]
; What should be written to Microsoft Psql. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
psqloutputtype = 0
; Psql endpoint used. must be in uri format. example: postgres://username:password@hostname:port/database?sslmode=disable
psqlendpoint =
; Number of PSQL workers
psqlworkers = 1
; Psql Batch Size
psqlbatchsize = 1
; Interval between sending results to Psql if Batch size is not filled. Any value larger than zero takes precedence over Batch Size
psqlbatchdelay = 0s
; Timeout for any INSERT operation before we consider them failed
psqlbatchtimeout = 5s
; Save full packet query and response in JSON format.
psqlsavefullquery = false
[sentinel_output]
; What should be written to Microsoft Sentinel. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
sentineloutputtype = 0
; Sentinel Shared Key, either the primary or secondary, can be found in Agents Management page under Log Analytics workspace
sentineloutputsharedkey =
; Sentinel Customer Id. can be found in Agents Management page under Log Analytics workspace
sentineloutputcustomerid =
; Sentinel Output LogType
sentineloutputlogtype = dnsmonster
; Sentinel Output Proxy in URI format
sentineloutputproxy =
; Sentinel Batch Size
sentinelbatchsize = 100
; Interval between sending results to Sentinel if Batch size is not filled. Any value larger than zero takes precedence over Batch Size
sentinelbatchdelay = 0s
[splunk_output]
; What should be written to HEC. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
splunkoutputtype = 0
; splunk endpoint address, example: http://127.0.0.1:8088. Used if splunkOutputType is not none, can be specified multiple times for load balanace and HA
splunkoutputendpoint =
; Splunk HEC Token
splunkoutputtoken = 00000000-0000-0000-0000-000000000000
; Splunk Output Index
splunkoutputindex = temp
; Splunk Output Proxy in URI format
splunkoutputproxy =
; Splunk Output Source
splunkoutputsource = dnsmonster
; Splunk Output Sourcetype
splunkoutputsourcetype = json
; Send data to HEC in batch sizes
splunkbatchsize = 1000
; Interval between sending results to HEC if Batch size is not filled
splunkbatchdelay = 1s
[stdout_output]
; What should be written to stdout. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
stdoutoutputtype = 0
; Output format for stdout. options:json,csv, csv_no_header, gotemplate. note that the csv splits the datetime format into multiple fields
stdoutoutputformat = json
; Go Template to format the output as needed
stdoutoutputgotemplate = {{.}}
; Number of workers
stdoutoutputworkercount = 8
[syslog_output]
; What should be written to Syslog server. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
syslogoutputtype = 0
; Syslog endpoint address, example: udp://127.0.0.1:514, tcp://127.0.0.1:514. Used if syslogOutputType is not none
syslogoutputendpoint = udp://127.0.0.1:514
[zinc_output]
; What should be written to zinc. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
zincoutputtype = 0
; index used to save data in Zinc
zincoutputindex = dnsmonster
; zinc endpoint address, example: http://127.0.0.1:9200/api/default/_bulk. Used if zincOutputType is not none
zincoutputendpoint =
; zinc username, example: admin@admin.com. Used if zincOutputType is not none
zincoutputusername =
; zinc password, example: password. Used if zincOutputType is not none
zincoutputpassword =
; Send data to Zinc in batch sizes
zincbatchsize = 1000
; Interval between sending results to Zinc if Batch size is not filled
zincbatchdelay = 1s
; Zing request timeout
zinctimeout = 10s
[general]
; Garbage Collection interval for tcp assembly and ip defragmentation
gctime = 10s
; Duration to calculate interface stats
capturestatsdelay = 1s
; Mask IPv4s by bits. 32 means all the bits of IP is saved in DB
masksize4 = 32
; Mask IPv6s by bits. 32 means all the bits of IP is saved in DB
masksize6 = 128
; Name of the server used to index the metrics.
servername = default
; Set debug Log format
logformat = text
; Set debug Log level, 0:PANIC, 1:ERROR, 2:WARN, 3:INFO, 4:DEBUG
loglevel = 3
; Size of the result processor channel size
resultchannelsize = 100000
; write cpu profile to file
cpuprofile =
; write memory profile to file
memprofile =
; GOMAXPROCS variable
gomaxprocs = -1
; Limit of packets logged to clickhouse every iteration. Default 0 (disabled)
packetlimit = 0
; Skip outputing domains matching items in the CSV file path. Can accept a URL (http:// or https://) or path
skipdomainsfile =
; Hot-Reload skipdomainsfile interval
skipdomainsrefreshinterval = 1m0s
; Allow Domains logic input file. Can accept a URL (http:// or https://) or path
allowdomainsfile =
; Hot-Reload allowdomainsfile file interval
allowdomainsrefreshinterval = 1m0s
; Skip TLS verification when making HTTPS connections
skiptlsverification = false
[metric]
; Metric Endpoint Service
metricendpointtype = stderr
; Statsd endpoint. Example: 127.0.0.1:8125
metricstatsdagent =
; Prometheus Registry endpoint. Example: http://0.0.0.0:2112/metric
metricprometheusendpoint =
; Format for stderr output.
metricstderrformat = json
; Interval between sending results to Metric Endpoint
metricflushinterval = 10s