forked from branchnetconsulting/wazuh-tools
-
Notifications
You must be signed in to change notification settings - Fork 0
/
sync-ossec-conf
48 lines (46 loc) · 2.75 KB
/
sync-ossec-conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/bin/bash
#
# sync-ossec-conf
#
# Run this on the Wazuh master node manager after changes to ossec.conf are made there, to propagate them across the cluster.
# This script will generate and propatate a unique ossec.conf file for each worker node based on the one on the master node.
# - <node_name> value will be set to the name of the specific worker node
# - <node_type> value will be set to "worker"
#
# Requirements
# - The root user on the master node manager be able to use ssh key-based authentication to login as root on each worker node.
# - The master node manager much be able to reach each worker node via ssh at the IP listed for the worker by "cluster_control -l".
# - The systemctl command on each of the worker nodes must be able to reference the Wazuh manager service as "wazuh-manager".
#
# Special kinds of ossec.conf sections you might not want to propagate from master to workers:
# - <agentless> sections, wodle sections like "aws-s3", "azure-logs", or <gcp-pubsub> sections
# Such sections redundantly present on all nodes of your Wazuh manager cluster may lead the same cloud-pulled and agentless events
# to be redundantly analyzed, generating redundant Wazuh alerts.
# If sections like this are needed in your environment and the total event volume generated by them is not large enough to
# need to be load balanced, you might reserve them to only be used on the Wazuh master node manager, and then to adapt this
# script to filter them out before propagating ossec.conf to the worker nodes.
#
# Also consider that this tool does not allow for worker nodes to have any config sections that are not present on the master.
#
# Wazuh manager on worker nodes is restarted in a cascading fashion, with RESTART_DELAY adding additional time between
# the restarts of consecutive worker nodes.
#
RESTART_DELAY=10
/var/ossec/bin/cluster_control -l | grep worker | sort |
while read -r line; do
NODENAME=`echo $line | awk '{print $1}'`
NODEIP=`echo $line | awk '{print $4}'`
echo ""
echo "Syncing Wazuh master node manager config to worker node $NODENAME..."
sed 's/<node_name>.*/<node_name>'$NODENAME'<\/node_name>/' /var/ossec/etc/ossec.conf | sed 's/<node_type>.*/<node_type>worker<\/node_type>/' > /tmp/$NODENAME.config
scp -q /tmp/$NODENAME.config root@$NODEIP:/var/ossec/etc/ossec.conf
echo "Restarting worker node $NODENAME..."
ssh -n root@$NODEIP "chown ossec:ossec /var/ossec/etc/ossec.conf; systemctl restart wazuh-manager &> /dev/null"
ssh -n root@$NODEIP 'systemctl status wazuh-manager | egrep -i "( Active: |WARNING|ERROR)" | sed "s/^ \+//" | sed "s/^/ /"'
echo "Rolling restart delay..."
sleep $RESTART_DELAY
done
echo ""
echo "Wazuh cluster ossec.conf sync and restart complete."
/var/ossec/bin/cluster_control -l | sed 's/^/ /'
echo ""