diff --git a/.gitmodules b/.gitmodules
index c79d671cb..02eb55ebc 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -3,19 +3,17 @@
url = https://github.com/ceph/ceph-ansible.git
ignore = dirty
branch = stable-6.0
-[submodule "src/debian/python3-setup-ovs"]
- path = src/debian/python3-setup-ovs
- url = https://github.com/seapath/python3-setup-ovs.git
- branch = main
-[submodule "src/debian/vm_manager"]
- path = src/debian/vm_manager
- url = https://github.com/seapath/vm_manager.git
- branch = main
-[submodule "src/cukinia-tests"]
- path = src/cukinia-tests
+[submodule "roles/deploy_cukinia_tests/cukinia-tests"]
+ path = roles/deploy_cukinia_tests/cukinia-tests
url = https://github.com/seapath/cukinia-tests.git
branch = main
-[submodule "src/cukinia"]
- path = src/cukinia
+[submodule "roles/deploy_cukinia/files/cukinia"]
+ path = roles/deploy_cukinia/files/cukinia
url = https://github.com/savoirfairelinux/cukinia.git
branch = master
+[submodule "roles/deploy_python3_setup_ovs/files/python3-setup-ovs"]
+ path = roles/deploy_python3_setup_ovs/files/python3-setup-ovs
+ url = https://github.com/seapath/python3-setup-ovs.git
+[submodule "roles/deploy_vm_manager/files/vm_manager"]
+ path = roles/deploy_vm_manager/files/vm_manager
+ url = https://github.com/seapath/vm_manager.git
diff --git a/playbooks/ci_test.yaml b/playbooks/ci_test.yaml
index 89733c250..e61046c63 100644
--- a/playbooks/ci_test.yaml
+++ b/playbooks/ci_test.yaml
@@ -5,6 +5,15 @@
# SEAPATH.
---
-- import_playbook: ./test_deploy_cukinia.yaml
+
+- name: deploy cukinia
+ hosts:
+ - cluster_machines
+ - standalone_machine
+ - VMs
+ become: true
+ roles:
+ - deploy_cukinia
+
- import_playbook: ./test_deploy_cukinia_tests.yaml
- import_playbook: ./test_run_cukinia.yaml
diff --git a/playbooks/seapath_setup_hardened_debian.yaml b/playbooks/seapath_setup_hardened_debian.yaml
index 4b8ab2b54..8c45b233c 100644
--- a/playbooks/seapath_setup_hardened_debian.yaml
+++ b/playbooks/seapath_setup_hardened_debian.yaml
@@ -11,7 +11,7 @@
vars:
revert: false
roles:
- - debian/hardening
+ - debian_hardening
- name: Add hardened for SEAPATH physical machines
become: true
hosts:
@@ -20,7 +20,7 @@
vars:
revert: false
roles:
- - debian/hardening/physical_machine
+ - debian_hardening_physical_machine
- name: Reboot to apply hardening
become: true
diff --git a/playbooks/seapath_setup_main.yaml b/playbooks/seapath_setup_main.yaml
index 9330606b3..384f71d74 100644
--- a/playbooks/seapath_setup_main.yaml
+++ b/playbooks/seapath_setup_main.yaml
@@ -28,6 +28,22 @@
- import_playbook: seapath_setup_network.yaml
+- name: Configure timemaster
+ hosts:
+ - cluster_machines
+ - standalone_machine
+ become: true
+ roles:
+ - timemaster
+
+- name: Configure snmp
+ hosts:
+ - cluster_machines
+ - standalone_machine
+ become: true
+ roles:
+ - snmp
+
- import_playbook: cluster_setup_ceph.yaml
- import_playbook: cluster_setup_libvirt.yaml
- import_playbook: cluster_setup_add_livemigration_user.yaml
diff --git a/playbooks/seapath_setup_network.yaml b/playbooks/seapath_setup_network.yaml
index f601698ef..05de83c76 100644
--- a/playbooks/seapath_setup_network.yaml
+++ b/playbooks/seapath_setup_network.yaml
@@ -2,16 +2,14 @@
# Copyright (C) 2024 Savoir-faire Linux, Inc.
# SPDX-License-Identifier: Apache-2.0
-# This Ansible playbook configures the networks and defines the hostnames. It
-# can be used on cluster machines and VMs.
+# This Ansible playbook configures the networks and defines the hostnames.
---
-- name: Get distrebution variables
+- name: Get distribution variables
gather_facts: true
hosts:
- cluster_machines
- standalone_machine
- - VMs
tasks:
- include_vars: "../vars/{{ ansible_distribution }}_paths.yml"
@@ -19,7 +17,6 @@
hosts:
- cluster_machines
- standalone_machine
- - VMs
become: true
tasks:
- block:
@@ -63,24 +60,11 @@
- role: systemd_networkd
when: netplan_configurations is not defined
-- name: Apply network with systemd-networkd roles on VMs
- become: true
- hosts:
- - VMs
- vars_files:
- - ../vars/network_vars.yml
- vars:
- systemd_networkd_apply_config: "true"
- roles:
- - role: systemd_networkd
- when: netplan_configurations is not defined
-
- name: Apply network config with netplan
become: true
hosts:
- cluster_machines
- standalone_machine
- - VMs
tasks:
- block:
- name: Create /etc/netplan directory
@@ -248,7 +232,6 @@
- name: Configure hosts and hostname
hosts:
- standalone_machine
- - VMs
become: true
tasks:
- name: Set hostname
@@ -270,7 +253,6 @@
hosts:
- cluster_machines
- standalone_machine
- - VMs
become: true
tasks:
- block:
@@ -319,7 +301,6 @@
hosts:
- cluster_machines
- standalone_machine
- - VMs
become: true
tasks:
- block:
@@ -363,7 +344,6 @@
hosts:
- cluster_machines
- standalone_machine
- - VMs
become: true
tasks:
- name: Stop and disable systemd-resolved
@@ -373,72 +353,6 @@
enabled: false
when: dns_servers is not defined
-- name: Configure TimeMaster
- hosts:
- - cluster_machines
- - standalone_machine
- become: true
- vars:
- apply_config: "{{ apply_network_config | default(false) }}"
- tasks:
- - name: Populate service facts
- service_facts:
- - name: stop and disable systemd-timesyncd if it exists
- service:
- name: "systemd-timesyncd"
- state: stopped
- enabled: false
- when: "'systemd-timesyncd.service' in services"
- - name: Create timemaster configuration
- template:
- src: ../templates/timemaster.conf.j2
- dest: "{{ path_timemaster_conf }}"
- register: timemasterconf1
- - name: comment pool configuration in chrony.conf
- replace:
- path: "{{ path_chrony_conf }}"
- regexp: '^(pool .*)'
- replace: '#\1'
- register: timemasterconf2
- - name: Create timemaster.service.d directory
- file:
- path: /etc/systemd/system/timemaster.service.d/
- state: directory
- owner: root
- group: root
- mode: 0755
- - name: Copy timemaster.service overide
- template:
- src: ../templates/timemaster.service.j2
- dest: /etc/systemd/system/timemaster.service.d/override.conf
- register: timemasterconf3
- - name: Enable timemaster
- service:
- name: "timemaster"
- enabled: true
- - name: restart timemaster if necessary
- service:
- name: "timemaster"
- state: restarted
- enabled: true
- daemon_reload: true
- when:
- - timemasterconf1.changed or timemasterconf2.changed or timemasterconf3.changed
- - apply_config or need_reboot is not defined or not need_reboot
-
-- name: Stop chrony service
- hosts:
- - cluster_machines
- - standalone_machine
- - VMs
- become: true
- tasks:
- - name: stop and disable chrony
- service:
- name: "{{ service_name_chrony }}"
- state: stopped
- enabled: false
-
- name: Configure systemd-networkd-wait-online.service
hosts:
- cluster_machines
@@ -583,14 +497,6 @@
state: stopped
when: conntrackd_ignore_ip_list is not defined
-- name: Configure snmp
- hosts:
- - cluster_machines
- - standalone_machine
- become: true
- roles:
- - snmp
-
- name: Restart machine if needed
hosts:
- cluster_machines
diff --git a/playbooks/seapath_setup_prerequiscentos.yaml b/playbooks/seapath_setup_prerequiscentos.yaml
index 4f8cca252..826f728d0 100644
--- a/playbooks/seapath_setup_prerequiscentos.yaml
+++ b/playbooks/seapath_setup_prerequiscentos.yaml
@@ -16,14 +16,14 @@
- standalone_machine
become: true
roles:
- - centos/physical_machine
+ - centos_physical_machine
- name: Prerequis hypervisor centos
hosts:
- hypervisors
- standalone_machine
become: true
roles:
- - centos/hypervisor
+ - centos_hypervisor
- name: Add admin user to haclient group
hosts:
diff --git a/playbooks/seapath_setup_prerequisdebian.yaml b/playbooks/seapath_setup_prerequisdebian.yaml
index c79e90e8f..418c6b9ea 100644
--- a/playbooks/seapath_setup_prerequisdebian.yaml
+++ b/playbooks/seapath_setup_prerequisdebian.yaml
@@ -16,14 +16,14 @@
- standalone_machine
become: true
roles:
- - debian/physical_machine
+ - debian_physical_machine
- name: Prerequis hypervisor debian
hosts:
- hypervisors
- standalone_machine
become: true
roles:
- - debian/hypervisor
+ - debian_hypervisor
- name: Add admin user to haclient group
hosts:
diff --git a/playbooks/seapath_setup_unhardened_debian.yaml b/playbooks/seapath_setup_unhardened_debian.yaml
index b44a86501..7ac568370 100644
--- a/playbooks/seapath_setup_unhardened_debian.yaml
+++ b/playbooks/seapath_setup_unhardened_debian.yaml
@@ -11,7 +11,7 @@
vars:
revert: true
roles:
- - debian/hardening
+ - debian_hardening
- name: Remove hardened for SEAPATH physical machines
become: true
hosts:
@@ -20,7 +20,7 @@
vars:
revert: true
roles:
- - debian/hardening/physical_machine
+ - debian_hardening_physical_machine
- name: Reboot to apply hardening revert
become: true
diff --git a/playbooks/test_deploy_cukinia.yaml b/playbooks/test_deploy_cukinia.yaml
deleted file mode 100644
index d9e37c05a..000000000
--- a/playbooks/test_deploy_cukinia.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) 2022, RTE (http://www.rte-france.com)
-# SPDX-License-Identifier: Apache-2.0
-
----
-- name: Install Cukinia
- hosts:
- - cluster_machines
- - standalone_machine
- - VMs
- become: true
- tasks:
- - name: Copy Cukinia script
- copy:
- src: ../src/cukinia/cukinia
- dest: /usr/local/bin/cukinia
- mode: '0755'
- - name: Use Bash as Cukinia's shell
- replace:
- path: /usr/local/bin/cukinia
- regexp: '^#!/bin/sh$'
- replace: "#!/usr/bin/env bash"
- - name: "Patch cukinia: CAT"
- lineinfile:
- path: /usr/local/bin/cukinia
- line: "local CAT=zcat"
- insertafter: 'local line=""'
diff --git a/playbooks/test_deploy_cukinia_tests.yaml b/playbooks/test_deploy_cukinia_tests.yaml
index 83ea1c66b..eaed63474 100644
--- a/playbooks/test_deploy_cukinia_tests.yaml
+++ b/playbooks/test_deploy_cukinia_tests.yaml
@@ -8,91 +8,6 @@
- standalone_machine
- VMs
become: true
- gather_facts: true
- tasks:
- - name: Copy Cukinia's tests
- synchronize:
- src: ../src/cukinia-tests/cukinia
- dest: /etc/
- delete: true
- rsync_opts:
- - "--exclude=*.j2"
- - name: Copy Cukinia's tests templates
- template:
- src: ../src/cukinia-tests/cukinia/{{ item.src }}
- dest: /etc/cukinia/{{ item.dest }}
- with_items:
- - { src: 'common_security_tests.d/apt.conf.j2',
- dest: 'common_security_tests.d/apt.conf' }
- - { src: 'hypervisor_security_tests.d/shadow.conf.j2',
- dest: 'hypervisor_security_tests.d/shadow.conf' }
- - { src: 'hypervisor_security_tests.d/passwd.conf.j2',
- dest: 'hypervisor_security_tests.d/passwd.conf' }
- - { src: 'hypervisor_security_tests.d/groups.conf.j2',
- dest: 'hypervisor_security_tests.d/groups.conf' }
- - { src: 'common_security_tests.d/sudo.conf.j2',
- dest: 'common_security_tests.d/sudo.conf' }
- - name: Create /usr/share/cukinia/includes
- file:
- path: /usr/share/cukinia/includes
- state: directory
- owner: root
- group: root
- mode: 0755
- - name: Copy Cukinia's includes
- copy:
- src: ../src/cukinia-tests/includes/
- dest: /usr/share/cukinia/includes/
- - name: Create /usr/share/testdata
- file:
- path: /usr/share/testdata
- state: directory
- owner: root
- group: root
- mode: 0755
-
-- name: Deploy VM test files
- hosts:
- - cluster_machines
- - standalone_machine
- become: true
- tasks:
- - name: Copy vm.xml
- copy:
- src: ../src/debian/vm_manager/vm_manager/testdata/vm.xml
- dest: /usr/share/testdata
- - name: Copy wrong_vm_config.xml
- copy:
- src: ../src/debian/vm_manager/vm_manager/testdata/wrong_vm_config.xml
- dest: /usr/share/testdata
-
-
-- name: Create /etc/cukinia.conf for observers
- hosts: observers
- become: true
- tasks:
- - name: Create a symlink cukinia.conf to cukinia-observer.conf
- file:
- src: /etc/cukinia/cukinia-observer.conf
- dest: /etc/cukinia/cukinia.conf
- state: link
-
-- name: Create /etc/cukinia.conf for hypervisors
- hosts: hypervisors
- become: true
- tasks:
- - name: Create a symlink cukinia.conf to cukinia-hypervisor.conf
- file:
- src: /etc/cukinia/cukinia-hypervisor.conf
- dest: /etc/cukinia/cukinia.conf
- state: link
-
-- name: Create /etc/cukinia.conf for VMs
- hosts: VMs
- become: true
- tasks:
- - name: Create a symlink cukinia.conf to cukinia-observer.conf
- file:
- src: /etc/cukinia/cukinia-observer.conf
- dest: /etc/cukinia/cukinia.conf
- state: link
+ gather_facts: True
+ roles:
+ - deploy_cukinia_tests
diff --git a/roles/centos/files/journald.conf b/roles/centos/files/journald.conf
new file mode 100644
index 000000000..6e1f321f0
--- /dev/null
+++ b/roles/centos/files/journald.conf
@@ -0,0 +1,44 @@
+# This file is part of systemd.
+#
+# systemd is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or
+# (at your option) any later version.
+#
+# Entries in this file show the compile time defaults.
+# You can change settings by editing this file.
+# Defaults can be restored by simply deleting this file.
+#
+# See journald.conf(5) for details.
+
+[Journal]
+Storage=persistent
+#Compress=yes
+#Seal=yes
+#SplitMode=uid
+#SyncIntervalSec=5m
+#RateLimitIntervalSec=30s
+#RateLimitBurst=10000
+#SystemMaxUse=
+#SystemKeepFree=
+#SystemMaxFileSize=
+#SystemMaxFiles=100
+#RuntimeMaxUse=
+#RuntimeKeepFree=
+#RuntimeMaxFileSize=
+#RuntimeMaxFiles=100
+#MaxRetentionSec=
+#MaxFileSec=1month
+#ForwardToSyslog=yes
+#ForwardToKMsg=no
+#ForwardToConsole=no
+#ForwardToWall=yes
+#TTYPath=/dev/console
+#MaxLevelStore=debug
+#MaxLevelSyslog=debug
+#MaxLevelKMsg=notice
+#MaxLevelConsole=info
+#MaxLevelWall=emerg
+#LineMax=48K
+#ReadKMsg=yes
+#Audit=no
diff --git a/roles/centos/files/sysctl/00-panicreboot.conf b/roles/centos/files/sysctl/00-panicreboot.conf
new file mode 100644
index 000000000..56730a77e
--- /dev/null
+++ b/roles/centos/files/sysctl/00-panicreboot.conf
@@ -0,0 +1 @@
+kernel.panic = 20
diff --git a/roles/centos/physical_machine/tasks/main.yml b/roles/centos/physical_machine/tasks/main.yml
deleted file mode 100644
index 7db3e7d61..000000000
--- a/roles/centos/physical_machine/tasks/main.yml
+++ /dev/null
@@ -1,464 +0,0 @@
-# Copyright (C) 2024 Red Hat, Inc.
-# SPDX-License-Identifier: Apache-2.0
-
-# We need to restart nginx if it's running and know if gunicorn socket is present
-- name: Populate service facts
- service_facts:
-#- name: Print service facts
-# ansible.builtin.debug:
-# var: ansible_facts.services
-- name: install vm-mgr http interface
- vars:
- vmmgrapi_certs_dir: "/var/local/vmmgrapi/certs"
- block:
- - name: create vm-mgr api certs folder
- file:
- path: "{{ vmmgrapi_certs_dir }}"
- state: directory
- mode: 0755
-
- - name: upload cert/key if provided
- copy:
- src: "{{ item }}"
- dest: "{{ vmmgrapi_certs_dir }}/{{ item }}"
- mode: '0644'
- with_items:
- - "{{ vmmgr_http_tls_crt_path }}"
- - "{{ vmmgr_http_tls_key_path }}"
- when:
- - vmmgr_http_tls_crt_path is defined
- - vmmgr_http_tls_key_path is defined
-
- - name: create certificat / key if not provided
- command: openssl req -x509 -nodes -days 9125 -newkey rsa:4096 -subj "/C=FR/ST=seapath/L=seapath/O=seapath/OU=seapath/CN=seapath" -keyout "{{ vmmgrapi_certs_dir }}/seapath.key" -out "{{ vmmgrapi_certs_dir }}/seapath.crt"
- args:
- creates: "{{ item }}"
- with_items:
- - "{{ vmmgrapi_certs_dir }}/seapath.crt"
- - "{{ vmmgrapi_certs_dir }}/seapath.key"
-
- - name: Correct certificates rights
- file:
- path: "{{ vmmgrapi_certs_dir }}/{{ item }}"
- mode: 0644
- loop:
- - "seapath.crt"
-
- - name: Correct private keys rights
- file:
- path: "{{ vmmgrapi_certs_dir }}/{{ item }}"
- mode: 0640
- loop:
- - "seapath.key"
-
- - name: Check permission on authentication file
- ansible.builtin.file:
- path: "{{ vmmgr_http_local_auth_file }}"
- owner: www-data
- group: www-data
- mode: '0600'
- state: touch
- when: vmmgr_http_local_auth_file is defined
-
- - name: Copy nginx.conf
- template:
- src: ../src/debian/vmmgrapi/nginx.conf.j2
- dest: /etc/nginx/nginx.conf
- mode: '0600'
- register: nginx_conf
-
- - name: restart nginx if needed
- ansible.builtin.systemd:
- name: nginx.service
- enabled: no
- state: stopped
- when:
- - nginx_conf.changed
- - services['nginx.service']['state'] == "running"
-
- - name: Copy vmmgrapi files
- ansible.builtin.copy:
- src: ../src/debian/vmmgrapi/{{ item }}
- dest: /var/local/vmmgrapi/{{ item }}
- mode: '0644'
- with_items:
- - wsgi.py
-
- - name: Copy vmmgrapi systemd files
- ansible.builtin.copy:
- src: ../src/debian/vmmgrapi/{{ item }}
- dest: /etc/systemd/system/{{ item }}
- mode: '0644'
- with_items:
- - gunicorn.socket
- - gunicorn.service
- register: vmmgrapi_systemd
-
- - name: daemon-reload vmmgrapi
- ansible.builtin.service:
- daemon_reload: yes
- when: vmmgrapi_systemd.changed
-
- - name: restart gunicorn.socket if needed
- ansible.builtin.systemd:
- name: gunicorn.socket
- enabled: yes
- state: restarted
- when: vmmgrapi_systemd.changed
-
- - name: start and enable gunicorn.socket
- ansible.builtin.systemd:
- name: gunicorn.socket
- enabled: yes
- state: started
-
- when: enable_vmmgr_http_api is defined and enable_vmmgr_http_api is true
-
-- name: disable gunicorn.socket if http flask api is not enabled
- ansible.builtin.systemd:
- name: gunicorn.socket
- enabled: no
- state: stopped
- when:
- - enable_vmmgr_http_api is not defined or enable_vmmgr_http_api is false
- - services['gunicorn.socket'] is defined
-
-- name: disable nginx.service all the time, if it exists
- ansible.builtin.systemd:
- name: nginx.service
- enabled: no
- when:
- - services['nginx.service'] is defined
-
-- name: disable gunicorn.service all the time, if it exists
- ansible.builtin.systemd:
- name: gunicorn.service
- enabled: no
- when:
- - services['gunicorn.service'] is defined
-
-- name: Copy sysctl rules
- ansible.builtin.copy:
- src: ../src/debian/sysctl/{{ item }}
- dest: /etc/sysctl.d/{{ item }}
- mode: '0644'
- with_items:
- - 00-bridge_nf_call.conf
- register: sysctl1
-
-- name: Add sysctl conf from inventory (extra_sysctl_physical_machines)
- ansible.builtin.copy:
- dest: /etc/sysctl.d/00-seapathextra_physicalmachines.conf
- mode: '0644'
- content: "{{ extra_sysctl_physical_machines }}"
- when: extra_sysctl_physical_machines is defined
- register: sysctl2
-
-- name: restart systemd-sysctl if needed
- ansible.builtin.systemd:
- name: systemd-sysctl.service
- state: restarted
- when: sysctl1.changed or sysctl2.changed
-
-- name: create src folder on hosts
- file:
- path: /tmp/src
- state: directory
- mode: '0755'
-
-- name: temp fix for synchronize to force evaluate variables
- set_fact:
- ansible_host: "{{ ansible_host }}"
-
-- name: Synchronization of src python3-setup-ovs on the control machine to dest on the remote hosts
- ansible.posix.synchronize:
- src: ../src/debian/python3-setup-ovs
- dest: /tmp/src
- rsync_opts:
- - "--chown=root:root"
-- name: Install python3-setup-ovs
- command:
- cmd: /usr/bin/python3 setup.py install
- chdir: /tmp/src/python3-setup-ovs
-- name: Copy seapath-config_ovs.service
- ansible.builtin.copy:
- src: ../src/debian/seapath-config_ovs.service
- dest: /etc/systemd/system/seapath-config_ovs.service
- mode: '0644'
- register: seapathconfigovs
-- name: daemon-reload seapath-config_ovs
- ansible.builtin.service:
- daemon_reload: yes
- when: seapathconfigovs.changed
-- name: enable seapath-config_ovs.service
- ansible.builtin.systemd:
- name: seapath-config_ovs.service
- enabled: yes
-
-- name: Synchronization of src vm_manager on the control machine to dest on the remote hosts
- ansible.posix.synchronize:
- src: ../src/debian/vm_manager
- dest: /tmp/src
- rsync_opts:
- - "--chown=root:root"
-- name: Install vm_manager
- command:
- cmd: /usr/bin/python3 setup.py install
- chdir: /tmp/src/vm_manager
-- name: Create a symbolic link
- ansible.builtin.file:
- src: /usr/local/bin/vm_manager_cmd.py
- dest: /usr/local/bin/vm-mgr
- state: link
-
-- name: Copy consolevm.sh
- template:
- src: ../src/debian/consolevm.sh.j2
- dest: /usr/local/bin/consolevm
- mode: '0755'
-
-- name: Synchronization of backup-restore folder on the control machine to dest on the remote hosts
- ansible.posix.synchronize:
- src: ../src/debian/backup-restore/
- dest: /usr/local/bin/
- rsync_opts:
- - "--chown=root:root"
- - "--exclude=*.j2"
-- name: Copy backup-restore templates
- template:
- src: "{{ item }}"
- dest: /usr/local/bin/{{ item | basename | regex_replace('\.j2$', '') }}
- mode: '0755'
- with_fileglob:
- - ../src/debian/backup-restore/*.j2
-- name: create /etc/backup-restore.conf file
- file:
- path: "/etc/backup-restore.conf"
- state: touch
- mode: 0644
- owner: root
- group: root
-- name: check configuration of backup-restore.sh tool (remote_shell)
- shell: 'grep -c "^remote_shell=" /etc/backup-restore.conf || true'
- register: check_remote_shell
-- name: add default configuration of backup-restore.sh tool (remote_shell)
- lineinfile:
- dest: /etc/backup-restore.conf
- regexp: "^remote_shell="
- line: "remote_shell=\"ssh\""
- state: present
- when: check_remote_shell.stdout == "0"
-
-- name: create /usr/lib/ocf/resource.d/seapath on hosts
- file:
- path: /usr/lib/ocf/resource.d/seapath
- state: directory
- mode: '0755'
-
-- name: Copy Pacemaker Seapath Resource-Agent files
- ansible.posix.synchronize:
- src: ../src/debian/pacemaker_ra/
- dest: /usr/lib/ocf/resource.d/seapath/
- rsync_opts:
- - "--chmod=F755"
- - "--chown=root:root"
-
-- name: Copy chrony-wait.service
- template:
- src: ../src/debian/chrony-wait.service.j2
- dest: /etc/systemd/system/chrony-wait.service
- owner: root
- group: root
- mode: '0644'
- register: chronywait
-- name: daemon-reload chrony-wait.service
- ansible.builtin.service:
- daemon_reload: yes
- when: chronywait.changed
-- name: enable chrony-wait.service
- ansible.builtin.systemd:
- name: chrony-wait.service
- enabled: yes
-
-- name: Create libvirtd.service.d directory
- file:
- path: /etc/systemd/system/libvirtd.service.d/
- state: directory
- owner: root
- group: root
- mode: 0755
-- name: Create pacemaker.service.d directory
- file:
- path: /etc/systemd/system/pacemaker.service.d/
- state: directory
- owner: root
- group: root
- mode: 0755
-- name: Copy pacemaker.service drop-in
- template:
- src: ../src/debian/pacemaker_override.conf.j2
- dest: /etc/systemd/system/pacemaker.service.d/override.conf
- owner: root
- group: root
- mode: 0644
- notify: daemon-reload
- register: pacemaker_corosync
-- name: Get Pacemaker service Status
- ansible.builtin.systemd:
- name: "pacemaker.service"
- register: pacemaker_service_status
-- name: disable pacemaker (reinstall step 1/2)
- ansible.builtin.systemd:
- name: pacemaker.service
- enabled: no
- when: pacemaker_corosync.changed and pacemaker_service_status.status.UnitFileState == "enabled"
-- name: enable pacemaker (reinstall step 2/2)
- ansible.builtin.systemd:
- name: pacemaker.service
- enabled: yes
- when: pacemaker_corosync.changed and pacemaker_service_status.status.UnitFileState == "enabled"
-
-- name: Add extra modules to the kernel
- block:
- - name: create extra modules conf file
- ansible.builtin.file:
- path: /etc/modules-load.d/extra_modules.conf
- owner: root
- group: root
- mode: 0751
- state: touch
- - name: add extra modules to conf file
- lineinfile:
- dest: /etc/modules-load.d/extra_modules.conf
- state: present
- regexp: "^{{ item }}$"
- line: "{{ item }}"
- with_items: "{{ extra_kernel_modules | default([]) }}"
-
-- name: Add admin user to libvirt group
- user:
- name: "{{ admin_user }}"
- groups: libvirt
- append: yes
-
-
-- name: Creating libvirt user with libvirtd permissions
- user: name=libvirt
- group=libvirt
- shell=/bin/false
-
-- name: add br_netfilter to /etc/modules-load.d
- ansible.builtin.copy:
- src: ../src/centos/modules/netfilter.conf
- dest: /etc/modules-load.d/netfilter.conf
- owner: root
- group: root
- mode: 0751
-
-- name: lineinfile in hosts file for logstash-seapath
- lineinfile:
- dest: /etc/hosts
- regexp: '.* logstash-seapath$'
- line: "{{ logstash_server_ip }} logstash-seapath"
- state: present
- when: logstash_server_ip is defined
-
-- name: Make libvirt use the "machine-id" way to determine host UUID
- lineinfile:
- dest: /etc/libvirt/libvirtd.conf
- regexp: "^#?host_uuid_source ="
- line: "host_uuid_source = \"machine-id\""
- state: present
-- name: restart libvirtd
- ansible.builtin.systemd:
- name: libvirtd.service
- state: restarted
-
-- name: enable virtsecretd
- ansible.builtin.systemd:
- name: virtsecretd.service
- enabled: yes
- state: started
-
-
-- name: enable docker.service
- ansible.builtin.systemd:
- name: docker.service
-- name: "add initramfs-tools scripts: script file (LVM rebooter and log handling)"
- ansible.builtin.copy:
- src: ../src/debian/initramfs-tools/scripts/
- dest: /etc/initramfs-tools/scripts/
- mode: '0755'
- register: initramfs_tools_scripts
-
-- name: "get the /var/log/ device"
- command: "findmnt -n -o SOURCE --target /var/log"
- register: varlog_dev
-
-- name: "set_fact /var/log/ device"
- set_fact:
- lvm_rebooter_log_device: "{{ varlog_dev.stdout }}"
-
-- name: "get the /var/log/ relative path"
- shell: "realpath --relative-to=$(findmnt -n -o TARGET --target /var/log/) /var/log"
- register: varlog_path
-
-- name: "set_fact /var/log/ relative path"
- set_fact:
- lvm_rebooter_log_path: "{{ varlog_path.stdout }}"
-
-- name: "Copy rebooter.conf"
- template:
- src: ../src/debian/initramfs-tools/conf.d/rebooter.conf.j2
- dest: /etc/dracut.conf.d/rebooter.conf
-
-- name: "configure initramfs-tools to use busybox"
- lineinfile:
- dest: /etc/dracut.conf
- regexp: "^#?BUSYBOX="
- line: "BUSYBOX=y"
- state: present
- register: initramfs_busybox
-
-- name: "add udev rules for lvm2 limitation"
- ansible.builtin.copy:
- src: ../src/debian/69-lvm.rules
- dest: /etc/udev/rules.d/69-lvm.rules
- mode: '0644'
- when: ansible_distribution == 'Debian' and ansible_distribution_version | int >= 12
- register: udevlvm
-- name: "rebuild initramfs if necessary"
- command:
- cmd: /usr/bin/dracut --regenerate-all --force
- when: udevlvm.changed or initramfs_tools_scripts.changed or initramfs_busybox.changed
-
-- name: "add rbd type to lvm.conf"
- ansible.builtin.lineinfile:
- path: /etc/lvm/lvm.conf
- insertafter: 'devices {'
- line: " types = [ \"rbd\", 1024 ]"
- state: present
-
-- name: "Configure firewalld for ceph"
- block:
- - name: "Configure firewalld for ceph monitor"
- ansible.posix.firewalld:
- port: 3300/tcp
- permanent: true
- state: enabled
- - name: "Configure firewalld for ceph monitor legacy v1 port"
- ansible.posix.firewalld:
- port: 6789/tcp
- permanent: true
- state: enabled
- - name: "Configure firewalld for ceph OSD"
- ansible.posix.firewalld:
- port: 6800-7300/tcp
- permanent: true
- state: enabled
-
-- name: "Configure firewalld for high-availability services"
- ansible.posix.firewalld:
- service: high-availability
- permanent: true
- state: enabled
diff --git a/roles/centos/tasks/main.yml b/roles/centos/tasks/main.yml
index ad4799906..6f77852be 100644
--- a/roles/centos/tasks/main.yml
+++ b/roles/centos/tasks/main.yml
@@ -1,3 +1,4 @@
+# Copyright (C) 2024 RTE
# Copyright (C) 2024 Red Hat, Inc.
# SPDX-License-Identifier: Apache-2.0
@@ -25,7 +26,7 @@
mode: '0755'
- name: Copy syslog-ng conf file
template:
- src: ../templates/syslog-ng.conf.j2
+ src: syslog-ng.conf.j2
dest: /etc/syslog-ng/syslog-ng.conf
mode: '0644'
notify: restart syslog-ng
@@ -81,7 +82,7 @@
- name: Copy journald conf file
ansible.builtin.copy:
- src: ../src/debian/journald.conf
+ src: journald.conf
dest: /etc/systemd/journald.conf
mode: '0644'
register: journaldconf
@@ -127,7 +128,7 @@
- name: Copy sysctl rules
ansible.builtin.copy:
- src: ../src/debian/sysctl/{{ item }}
+ src: sysctl/{{ item }}
dest: /etc/sysctl.d/{{ item }}
mode: '0644'
with_items:
diff --git a/roles/centos/templates/syslog-ng.conf.j2 b/roles/centos/templates/syslog-ng.conf.j2
new file mode 100644
index 000000000..23cb14a05
--- /dev/null
+++ b/roles/centos/templates/syslog-ng.conf.j2
@@ -0,0 +1,86 @@
+@version: 3.27
+# Copyright (C) 2022, RTE (http://www.rte-france.com)
+# SPDX-License-Identifier: Apache-2.0
+
+# First, set some global options.
+options {
+ chain_hostnames(off);
+ flush_lines(0);
+ use_dns(no);
+ dns_cache(no);
+ use_fqdn(no);
+ owner("root");
+ group("adm");
+ perm(0640);
+ stats_freq(0);
+ bad_hostname("^gconfd$");
+};
+
+########################
+# Sources
+########################
+# This is the default behavior of sysklogd package
+# Logs may come from unix stream, but not from another machine.
+#
+source s_src {
+ systemd_journal();
+ internal();
+ file("/proc/kmsg" program_override("kernel"));
+};
+
+########################
+# Destinations
+########################
+destination d_syslog { file("/var/log/syslog-ng/syslog.local"); };
+
+{% if syslog_server_ip is defined %}
+# Network destination
+# mem-buff-size is set to 163840000 ~= 156MB (default value)
+# disk-buf-size is set to 1073741824 = 1GB
+destination d_net {
+ network(
+ "{{ syslog_server_ip }}"
+{% if syslog_tls_ca is defined and syslog_tls_key is defined %}
+ port({{ syslog_tls_port | default(6514) }})
+ transport("tls")
+ tls(
+ key-file("/etc/syslog-ng/cert.d/clientkey.pem")
+ cert-file("/etc/syslog-ng/cert.d/clientcert.pem")
+ ca-dir("/etc/syslog-ng/ca.d")
+ )
+{% else %}
+ port({{ syslog_tcp_port | default(601) }})
+ transport("tcp")
+{% endif %}
+ time_zone("UTC")
+ disk-buffer(
+ mem-buf-size(16384000)
+ disk-buf-size(107374182)
+ reliable(yes)
+ dir("/var/log/syslog-ng")
+ )
+ );
+ };
+{% endif %}
+
+########################
+# Log paths
+########################
+{% if syslog_local is defined %}
+log {
+ source(s_src);
+ destination(d_syslog);
+};
+{% endif %}
+{% if syslog_server_ip is defined %}
+log {
+ source(s_src);
+{% if ansible_distribution == 'Debian' and ansible_distribution_version | int < 12 %}
+ if (program("libvirtd")) {
+ rewrite { set-facility("daemon"); };
+ };
+{% endif %}
+ destination(d_net);
+};
+{% endif %}
+
diff --git a/src/centos/modules/sriov_driver.conf b/roles/centos_hypervisor/files/modules/sriov_driver.conf
similarity index 100%
rename from src/centos/modules/sriov_driver.conf
rename to roles/centos_hypervisor/files/modules/sriov_driver.conf
diff --git a/src/centos/modules/vhost_vsock.conf b/roles/centos_hypervisor/files/modules/vhost_vsock.conf
similarity index 100%
rename from src/centos/modules/vhost_vsock.conf
rename to roles/centos_hypervisor/files/modules/vhost_vsock.conf
diff --git a/roles/centos_hypervisor/files/ovs-vswitchd_override.conf b/roles/centos_hypervisor/files/ovs-vswitchd_override.conf
new file mode 100644
index 000000000..11fdc19e3
--- /dev/null
+++ b/roles/centos_hypervisor/files/ovs-vswitchd_override.conf
@@ -0,0 +1,2 @@
+[Service]
+Slice=ovs.slice
diff --git a/roles/centos_hypervisor/files/tuned.conf.j2 b/roles/centos_hypervisor/files/tuned.conf.j2
new file mode 100644
index 000000000..78b706fc7
--- /dev/null
+++ b/roles/centos_hypervisor/files/tuned.conf.j2
@@ -0,0 +1,20 @@
+[main]
+include=realtime-virtual-host
+summary=Seapath profile for RT host (Seapath Rt Host = SRH)
+
+[variables]
+isolated_cores={{ isolcpus | default('') }}
+non_isolated_cores_expanded=${f:cpulist_unpack:${non_isolated_cores}}
+
+[sysctl]
+kernel.printk=3 1 1 7
+
+[sysfs]
+/sys/module/kvm/parameters/halt_poll_ns = 0
+
+[bootloader]
+cmdline_srh=+processor.max_cstate=1 intel_idle.max_cstate=1 cpufreq.default_governor=performance rcu_nocb_poll
+
+[scheduler]
+# for i in `pgrep rcuc` ; do grep Cpus_allowed_list /proc/$i/status ; done
+group.rcuc=0:f:10:*:^\[rcuc
diff --git a/roles/centos/hypervisor/tasks/main.yml b/roles/centos_hypervisor/tasks/main.yml
similarity index 77%
rename from roles/centos/hypervisor/tasks/main.yml
rename to roles/centos_hypervisor/tasks/main.yml
index 0b9a20e48..9a3f0859f 100644
--- a/roles/centos/hypervisor/tasks/main.yml
+++ b/roles/centos_hypervisor/tasks/main.yml
@@ -1,3 +1,4 @@
+# Copyright (C) 2024 RTE
# Copyright (C) 2024 Red Hat, Inc.
# SPDX-License-Identifier: Apache-2.0
@@ -14,7 +15,7 @@
- name: add vhost_vsock to /etc/modules-load.d
ansible.builtin.copy:
- src: ../src/centos/modules/vhost_vsock.conf
+ src: modules/vhost_vsock.conf
dest: /etc/modules-load.d/vhost_vsock.conf
owner: root
group: root
@@ -22,7 +23,7 @@
- name: add sriov driver to /etc/modules-load.d
ansible.builtin.copy:
- src: ../src/centos/modules/sriov_driver.conf
+ src: modules/sriov_driver.conf
dest: /etc/modules-load.d/sriov_driver.conf
owner: root
group: root
@@ -36,7 +37,7 @@
- name: add sriov sysfs rules
template:
- src: templates/sriov.conf.j2
+ src: sriov.conf.j2
dest: /etc/tmpfiles.d/sriov.conf
mode: '0644'
with_items: "{{ sriov | dict2items }}"
@@ -44,7 +45,7 @@
register: sriov_tmpfiles
- name: Copy sysfs.d cpumask
template:
- src: templates/tmpfiles-workqueue_cpumask.conf.j2
+ src: tmpfiles-workqueue_cpumask.conf.j2
dest: /etc/tmpfiles.d/tmpfiles-workqueue_cpumask.conf
mode: '0644'
register: tmpfiles_cpumask
@@ -100,7 +101,7 @@
- "machine"
- name: create systemd slices override (files)
template:
- src: ../templates/systemd_slice_override.j2
+ src: systemd_slice_override.j2
dest: /etc/systemd/system.control/{{ item.name }}.slice.d/50-AllowedCPUs.conf
owner: root
group: root
@@ -125,7 +126,7 @@
- name: create systemd slices
template:
- src: ../templates/systemd_slice.j2
+ src: systemd_slice.j2
dest: /etc/systemd/system/{{ item.name }}.slice
owner: root
group: root
@@ -199,7 +200,7 @@
- name: copy seapath-rt-host tuned profile conf
template:
- src: ../src/tuned/tuned.conf.j2
+ src: tuned.conf.j2
dest: /etc/tuned/seapath-rt-host/tuned.conf
group: root
owner: root
@@ -236,7 +237,7 @@
mode: 0755
- name: Copy ovs-vswitchd.service drop-in
ansible.builtin.copy:
- src: ../src/debian/ovs-vswitchd_override.conf
+ src: ovs-vswitchd_override.conf
dest: /etc/systemd/system/ovs-vswitchd.service.d/override.conf
owner: root
group: root
@@ -249,67 +250,6 @@
name: ovs-vswitchd
when: ovsvswitchd.changed
-
-- name: Copy ptp_status executable files
- ansible.builtin.copy:
- src: ../src/debian/ptpstatus/ptpstatus_script/ptpstatus.sh
- dest: /usr/local/bin/ptpstatus.sh
- mode: '0755'
-- name: Copy ptp_vsock executable files
- ansible.builtin.copy:
- src: ../src/debian/ptp_vsock.py
- dest: /usr/local/bin/ptp_vsock.py
- mode: '0755'
-
-- name: Copy ptp_status.service
- ansible.builtin.copy:
- src: ../src/debian/ptpstatus/ptpstatus.service
- dest: /etc/systemd/system/ptpstatus.service
- mode: '0644'
- register: ptpstatus
-- name: Copy ptp_vsock.service
- ansible.builtin.copy:
- src: ../src/debian/ptp_vsock.service
- dest: /etc/systemd/system/ptp_vsock.service
- mode: '0644'
- register: ptpvsock
-- name: daemon-reload ptp status
- ansible.builtin.service:
- daemon_reload: yes
- when: ptpstatus.changed or ptpvsock.changed
-- name: enable ptpstatus.service
- ansible.builtin.systemd:
- name: ptpstatus.service
- enabled: yes
- state: started
-- name: enable ptp_vsock.service
- ansible.builtin.systemd:
- name: ptp_vsock.service
- enabled: yes
- state: started
-
-- name: Create conntrackd.service.d directory
- file:
- path: /etc/systemd/system/conntrackd.service.d/
- state: directory
- owner: root
- group: root
- mode: 0755
-- name: Copy conntrackd.service drop-in
- ansible.builtin.copy:
- src: ../src/debian/conntrackd_override.conf
- dest: /etc/systemd/system/conntrackd.service.d/override.conf
- owner: root
- group: root
- mode: 0644
- register: conntrackd
-- name: Restart conntrackd
- ansible.builtin.systemd:
- state: restarted
- daemon_reload: yes
- name: conntrackd
- when: conntrackd.changed
-
- name: enable libvirtd.service
ansible.builtin.systemd:
name: libvirtd.service
diff --git a/roles/centos/hypervisor/templates/sriov.conf.j2 b/roles/centos_hypervisor/templates/sriov.conf.j2
similarity index 100%
rename from roles/centos/hypervisor/templates/sriov.conf.j2
rename to roles/centos_hypervisor/templates/sriov.conf.j2
diff --git a/roles/centos_hypervisor/templates/sriov_network_pool.xml.j2 b/roles/centos_hypervisor/templates/sriov_network_pool.xml.j2
new file mode 100644
index 000000000..3f726741e
--- /dev/null
+++ b/roles/centos_hypervisor/templates/sriov_network_pool.xml.j2
@@ -0,0 +1,6 @@
+
+{{ sriov_network_pool_name }}
+
+
+
+
diff --git a/roles/centos_hypervisor/templates/systemd_slice.j2 b/roles/centos_hypervisor/templates/systemd_slice.j2
new file mode 100644
index 000000000..19e6da826
--- /dev/null
+++ b/roles/centos_hypervisor/templates/systemd_slice.j2
@@ -0,0 +1,7 @@
+[Unit]
+Description={{ item.description }}
+Before=slices.target
+Wants={{ item.wants }}
+
+[Slice]
+AllowedCPUs={{ item.allowedcpus }}
diff --git a/roles/centos_hypervisor/templates/systemd_slice_override.j2 b/roles/centos_hypervisor/templates/systemd_slice_override.j2
new file mode 100644
index 000000000..8e8e245e5
--- /dev/null
+++ b/roles/centos_hypervisor/templates/systemd_slice_override.j2
@@ -0,0 +1,4 @@
+# This is a drop-in unit file extension, created via "systemctl set-property"
+# or an equivalent operation. Do not edit.
+[Slice]
+AllowedCPUs={{ item.allowedcpus }}
diff --git a/roles/centos/hypervisor/templates/tmpfiles-workqueue_cpumask.conf.j2 b/roles/centos_hypervisor/templates/tmpfiles-workqueue_cpumask.conf.j2
similarity index 100%
rename from roles/centos/hypervisor/templates/tmpfiles-workqueue_cpumask.conf.j2
rename to roles/centos_hypervisor/templates/tmpfiles-workqueue_cpumask.conf.j2
diff --git a/roles/centos_physical_machine/files/00-bridge_nf_call.conf b/roles/centos_physical_machine/files/00-bridge_nf_call.conf
new file mode 100644
index 000000000..3fe3e5277
--- /dev/null
+++ b/roles/centos_physical_machine/files/00-bridge_nf_call.conf
@@ -0,0 +1,3 @@
+net.bridge.bridge-nf-call-arptables = 0
+net.bridge.bridge-nf-call-ip6tables = 0
+net.bridge.bridge-nf-call-iptables = 0
diff --git a/roles/centos_physical_machine/files/69-lvm.rules b/roles/centos_physical_machine/files/69-lvm.rules
new file mode 100644
index 000000000..6544dccf9
--- /dev/null
+++ b/roles/centos_physical_machine/files/69-lvm.rules
@@ -0,0 +1,94 @@
+# Copyright (C) 2012,2021 Red Hat, Inc. All rights reserved.
+#
+# This file is part of LVM.
+#
+# This rule requires blkid to be called on block devices before so only devices
+# used as LVM PVs are processed (ID_FS_TYPE="LVM2_member").
+
+SUBSYSTEM!="block", GOTO="lvm_end"
+
+
+ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="1", GOTO="lvm_end"
+
+# Only process devices already marked as a PV - this requires blkid to be called before.
+ENV{ID_FS_TYPE}!="LVM2_member", GOTO="lvm_end"
+ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="lvm_end"
+ACTION=="remove", GOTO="lvm_end"
+
+# Create /dev/disk/by-id/lvm-pv-uuid- symlink for each PV
+ENV{ID_FS_UUID_ENC}=="?*", SYMLINK+="disk/by-id/lvm-pv-uuid-$env{ID_FS_UUID_ENC}"
+
+# If the PV is a special device listed below, scan only if the device is
+# properly activated. These devices are not usable after an ADD event,
+# but they require an extra setup and they are ready after a CHANGE event.
+# Also support coldplugging with ADD event but only if the device is already
+# properly activated.
+# This logic should be eventually moved to rules where those particular
+# devices are processed primarily (MD and loop).
+
+# DM device:
+KERNEL!="dm-[0-9]*", GOTO="next"
+ENV{DM_UDEV_PRIMARY_SOURCE_FLAG}=="1", ENV{DM_ACTIVATION}=="1", GOTO="lvm_scan"
+GOTO="lvm_end"
+
+# MD device:
+LABEL="next"
+KERNEL!="md[0-9]*", GOTO="next"
+IMPORT{db}="LVM_MD_PV_ACTIVATED"
+ACTION=="add", ENV{LVM_MD_PV_ACTIVATED}=="1", GOTO="lvm_scan"
+ACTION=="change", ENV{LVM_MD_PV_ACTIVATED}!="1", TEST=="md/array_state", ENV{LVM_MD_PV_ACTIVATED}="1", GOTO="lvm_scan"
+ACTION=="add", KERNEL=="md[0-9]*p[0-9]*", GOTO="lvm_scan"
+ENV{LVM_MD_PV_ACTIVATED}!="1", ENV{SYSTEMD_READY}="0"
+GOTO="lvm_end"
+
+# Loop device:
+LABEL="next"
+KERNEL!="loop[0-9]*", GOTO="next"
+ACTION=="add", ENV{LVM_LOOP_PV_ACTIVATED}=="1", GOTO="lvm_scan"
+ACTION=="change", ENV{LVM_LOOP_PV_ACTIVATED}!="1", TEST=="loop/backing_file", ENV{LVM_LOOP_PV_ACTIVATED}="1", GOTO="lvm_scan"
+ENV{LVM_LOOP_PV_ACTIVATED}!="1", ENV{SYSTEMD_READY}="0"
+GOTO="lvm_end"
+
+LABEL="next"
+ACTION!="add", GOTO="lvm_end"
+
+LABEL="lvm_scan"
+
+ENV{SYSTEMD_READY}="1"
+
+# pvscan will check if this device completes a VG,
+# i.e. all PVs in the VG are now present with the
+# arrival of this PV. If so, it prints to stdout:
+# LVM_VG_NAME_COMPLETE='foo'
+#
+# When the VG is complete it can be activated, so
+# vgchange -aay is run. It is run via
+# systemd since it can take longer to run than
+# udev wants to block when processing rules.
+# (if there are hundreds of LVs to activate,
+# the vgchange can take many seconds.)
+#
+# pvscan only reads the single device specified,
+# and uses temp files under /run/lvm to check if
+# other PVs in the VG are present.
+#
+# If event_activation=0 in lvm.conf, this pvscan
+# (using checkcomplete) will do nothing, so that
+# no event-based autoactivation will be happen.
+#
+# TODO: adjust the output of vgchange -aay so that
+# it's better suited to appearing in the journal.
+
+IMPORT{program}="/sbin/lvm pvscan --cache --listvg --checkcomplete --vgonline --autoactivation event --udevoutput --journal=output $env{DEVNAME}"
+TEST!="/run/systemd/system", GOTO="lvm_direct_vgchange"
+
+ENV{LVM_VG_NAME_COMPLETE}=="?*", RUN+="/usr/bin/systemd-run --no-block --property DefaultDependencies=no --unit lvm-activate-$env{LVM_VG_NAME_COMPLETE} /sbin/lvm vgchange -aay --autoactivation event $env{LVM_VG_NAME_COMPLETE}"
+GOTO="lvm_end"
+
+LABEL="lvm_direct_vgchange"
+ENV{LVM_VG_NAME_COMPLETE}=="?*", RUN+="/sbin/lvm vgchange -aay --autoactivation event $env{LVM_VG_NAME_COMPLETE}"
+TEST!="/run/initramfs", GOTO="lvm_end"
+ENV{LVM_VG_NAME_INCOMPLETE}=="?*", RUN+="/sbin/lvm vgchange --sysinit -aay --activation degraded $env{LVM_VG_NAME_INCOMPLETE}"
+GOTO="lvm_end"
+
+LABEL="lvm_end"
diff --git a/src/centos/modules/netfilter.conf b/roles/centos_physical_machine/files/modules/netfilter.conf
similarity index 100%
rename from src/centos/modules/netfilter.conf
rename to roles/centos_physical_machine/files/modules/netfilter.conf
diff --git a/roles/centos_physical_machine/files/pacemaker_ra/VirtualDomain b/roles/centos_physical_machine/files/pacemaker_ra/VirtualDomain
new file mode 100755
index 000000000..3bbdc56f5
--- /dev/null
+++ b/roles/centos_physical_machine/files/pacemaker_ra/VirtualDomain
@@ -0,0 +1,1201 @@
+#!/bin/sh
+#
+# Support: users@clusterlabs.org
+# License: GNU General Public License (GPL)
+#
+# Resource Agent for domains managed by the libvirt API.
+# Requires a running libvirt daemon (libvirtd).
+#
+# (c) 2008-2010 Florian Haas, Dejan Muhamedagic,
+# and Linux-HA contributors
+#
+# usage: $0 {start|stop|status|monitor|migrate_to|migrate_from|meta-data|validate-all}
+#
+#######################################################################
+# Initialization:
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+# Defaults
+OCF_RESKEY_config_default=""
+OCF_RESKEY_migration_transport_default=""
+OCF_RESKEY_migration_downtime_default=0
+OCF_RESKEY_migration_speed_default=0
+OCF_RESKEY_migration_network_suffix_default=""
+OCF_RESKEY_force_stop_default=0
+OCF_RESKEY_monitor_scripts_default=""
+OCF_RESKEY_autoset_utilization_cpu_default="true"
+OCF_RESKEY_autoset_utilization_host_memory_default="true"
+OCF_RESKEY_autoset_utilization_hv_memory_default="true"
+OCF_RESKEY_unset_utilization_cpu_default="false"
+OCF_RESKEY_unset_utilization_host_memory_default="false"
+OCF_RESKEY_unset_utilization_hv_memory_default="false"
+OCF_RESKEY_migrateport_default=$(( 49152 + $(ocf_maybe_random) % 64 ))
+OCF_RESKEY_CRM_meta_timeout_default=90000
+OCF_RESKEY_save_config_on_stop_default=false
+OCF_RESKEY_sync_config_on_stop_default=false
+OCF_RESKEY_snapshot_default=""
+OCF_RESKEY_backingfile_default=""
+OCF_RESKEY_stateless_default="false"
+OCF_RESKEY_copyindirs_default=""
+OCF_RESKEY_shutdown_mode_default=""
+OCF_RESKEY_start_resources_default="false"
+OCF_RESKEY_seapath_default="false"
+
+: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}}
+: ${OCF_RESKEY_migration_transport=${OCF_RESKEY_migration_transport_default}}
+: ${OCF_RESKEY_migration_downtime=${OCF_RESKEY_migration_downtime_default}}
+: ${OCF_RESKEY_migration_speed=${OCF_RESKEY_migration_speed_default}}
+: ${OCF_RESKEY_migration_network_suffix=${OCF_RESKEY_migration_network_suffix_default}}
+: ${OCF_RESKEY_force_stop=${OCF_RESKEY_force_stop_default}}
+: ${OCF_RESKEY_monitor_scripts=${OCF_RESKEY_monitor_scripts_default}}
+: ${OCF_RESKEY_autoset_utilization_cpu=${OCF_RESKEY_autoset_utilization_cpu_default}}
+: ${OCF_RESKEY_autoset_utilization_host_memory=${OCF_RESKEY_autoset_utilization_host_memory_default}}
+: ${OCF_RESKEY_autoset_utilization_hv_memory=${OCF_RESKEY_autoset_utilization_hv_memory_default}}
+: ${OCF_RESKEY_unset_utilization_cpu=${OCF_RESKEY_unset_utilization_cpu_default}}
+: ${OCF_RESKEY_unset_utilization_host_memory=${OCF_RESKEY_unset_utilization_host_memory_default}}
+: ${OCF_RESKEY_unset_utilization_hv_memory=${OCF_RESKEY_unset_utilization_hv_memory_default}}
+: ${OCF_RESKEY_migrateport=${OCF_RESKEY_migrateport_default}}
+: ${OCF_RESKEY_CRM_meta_timeout=${OCF_RESKEY_CRM_meta_timeout_default}}
+: ${OCF_RESKEY_save_config_on_stop=${OCF_RESKEY_save_config_on_stop_default}}
+: ${OCF_RESKEY_sync_config_on_stop=${OCF_RESKEY_sync_config_on_stop_default}}
+: ${OCF_RESKEY_snapshot=${OCF_RESKEY_snapshot_default}}
+: ${OCF_RESKEY_backingfile=${OCF_RESKEY_backingfile_default}}
+: ${OCF_RESKEY_stateless=${OCF_RESKEY_stateless_default}}
+: ${OCF_RESKEY_copyindirs=${OCF_RESKEY_copyindirs_default}}
+: ${OCF_RESKEY_shutdown_mode=${OCF_RESKEY_shutdown_mode_default}}
+: ${OCF_RESKEY_start_resources=${OCF_RESKEY_start_resources_default}}
+: ${OCF_RESKEY_seapath=${OCF_RESKEY_seapath_default}}
+
+if ocf_is_true ${OCF_RESKEY_sync_config_on_stop}; then
+ OCF_RESKEY_save_config_on_stop="true"
+fi
+#######################################################################
+
+## I'd very much suggest to make this RA use bash,
+## and then use magic $SECONDS.
+## But for now:
+NOW=$(date +%s)
+
+usage() {
+ echo "usage: $0 {start|stop|status|monitor|migrate_to|migrate_from|meta-data|validate-all}"
+}
+
+VirtualDomain_meta_data() {
+ cat <
+
+
+1.0
+
+
+Resource agent for a virtual domain (a.k.a. domU, virtual machine,
+virtual environment etc., depending on context) managed by libvirtd.
+
+Manages virtual domains through the libvirt virtualization framework
+
+
+
+
+
+Absolute path to the libvirt configuration file,
+for this virtual domain.
+
+Virtual domain configuration file
+
+
+
+
+
+Hypervisor URI to connect to. See the libvirt documentation for
+details on supported URI formats. The default is system dependent.
+Determine the system's default uri by running 'virsh --quiet uri'.
+
+Hypervisor URI
+
+
+
+
+
+Always forcefully shut down ("destroy") the domain on stop. The default
+behavior is to resort to a forceful shutdown only after a graceful
+shutdown attempt has failed. You should only set this to true if
+your virtual domain (or your virtualization backend) does not support
+graceful shutdown.
+
+Always force shutdown on stop
+
+
+
+
+
+Transport used to connect to the remote hypervisor while
+migrating. Please refer to the libvirt documentation for details on
+transports available. If this parameter is omitted, the resource will
+use libvirt's default transport to connect to the remote hypervisor.
+
+Remote hypervisor transport
+
+
+
+
+
+The username will be used in the remote libvirt remoteuri/migrateuri. No user will be
+given (which means root) in the username if omitted
+
+If remoteuri is set, migration_user will be ignored.
+
+Remote username for the remoteuri
+
+
+
+
+
+Define max downtime during live migration in milliseconds
+
+Live migration downtime
+
+
+
+
+
+Define live migration speed per resource in MiB/s
+
+Live migration speed
+
+
+
+
+
+Use a dedicated migration network. The migration URI is composed by
+adding this parameters value to the end of the node name. If the node
+name happens to be an FQDN (as opposed to an unqualified host name),
+insert the suffix immediately prior to the first period (.) in the FQDN.
+At the moment Qemu/KVM and Xen migration via a dedicated network is supported.
+
+Note: Be sure this composed host name is locally resolvable and the
+associated IP is reachable through the favored network. This suffix will
+be added to the remoteuri and migrateuri parameters.
+
+See also the migrate_options parameter below.
+
+Migration network host name suffix
+
+
+
+
+
+You can also specify here if the calculated migrate URI is unsuitable for your
+environment.
+
+If migrateuri is set then migration_network_suffix, migrateport and
+--migrateuri in migrate_options are effectively ignored. Use "%n" as the
+placeholder for the target node name.
+
+Please refer to the libvirt documentation for details on guest
+migration.
+
+Custom migrateuri for migration state transfer
+
+
+
+
+
+Extra virsh options for the guest live migration. You can also specify
+here --migrateuri if the calculated migrate URI is unsuitable for your
+environment. If --migrateuri is set then migration_network_suffix
+and migrateport are effectively ignored. Use "%n" as the placeholder
+for the target node name.
+
+Please refer to the libvirt documentation for details on guest
+migration.
+
+live migrate options
+
+
+
+
+
+To additionally monitor services within the virtual domain, add this
+parameter with a list of scripts to monitor.
+
+Note: when monitor scripts are used, the start and migrate_from operations
+will complete only when all monitor scripts have completed successfully.
+Be sure to set the timeout of these operations to accommodate this delay.
+
+space-separated list of monitor scripts
+
+
+
+
+
+If set true, the agent will detect the number of domainU's vCPUs from virsh, and put it
+into the CPU utilization of the resource when the monitor is executed.
+
+Enable auto-setting the CPU utilization of the resource
+
+
+
+
+
+If set true, the agent will detect the number of *Max memory* from virsh, and put it
+into the host_memory utilization of the resource when the monitor is executed.
+
+Enable auto-setting the host_memory utilization of the resource
+
+
+
+
+
+If set true, the agent will detect the number of *Max memory* from virsh, and put it
+into the hv_memory utilization of the resource when the monitor is executed.
+
+Enable auto-setting the hv_memory utilization of the resource
+
+
+
+
+
+If set true then the agent will remove the cpu utilization resource when the monitor
+is executed.
+
+Enable auto-removing the CPU utilization of the resource
+
+
+
+
+
+If set true then the agent will remove the host_memory utilization resource when the monitor
+is executed.
+
+Enable auto-removing the host_memory utilization of the resource
+
+
+
+
+
+If set true then the agent will remove the hv_memory utilization resource when the monitor
+is executed.
+
+Enable auto-removing the hv_memory utilization of the resource
+
+
+
+
+
+This port will be used in the qemu migrateuri. If unset, the port will be a random highport.
+
+Port for migrateuri
+
+
+
+
+
+Use this URI as virsh connection URI to commuicate with a remote hypervisor.
+
+If remoteuri is set then migration_user and migration_network_suffix are
+effectively ignored. Use "%n" as the placeholder for the target node name.
+
+Please refer to the libvirt documentation for details on guest
+migration.
+
+Custom remoteuri to communicate with a remote hypervisor
+
+
+
+
+
+Changes to a running VM's config are normally lost on stop.
+This parameter instructs the RA to save the configuration back to the xml file provided in the "config" parameter.
+
+Save running VM's config back to its config file
+
+
+
+
+
+Setting this automatically enables save_config_on_stop.
+When enabled this parameter instructs the RA to
+call csync2 -x to synchronize the file to all nodes.
+csync2 must be properly set up for this to work.
+
+Save running VM's config back to its config file
+
+
+
+
+
+Path to the snapshot directory where the virtual machine image will be stored. When this
+parameter is set, the virtual machine's RAM state will be saved to a file in the snapshot
+directory when stopped. If on start a state file is present for the domain, the domain
+will be restored to the same state it was in right before it stopped last. This option
+is incompatible with the 'force_stop' option.
+
+
+Restore state on start/stop
+
+
+
+
+
+
+When the VM is used in Copy-On-Write mode, this is the backing file to use (with its full path).
+The VMs image will be created based on this backing file.
+This backing file will never be changed during the life of the VM.
+
+If the VM is wanted to work with Copy-On-Write mode, this is the backing file to use (with its full path)
+
+
+
+
+
+If set to true and backingfile is defined, the start of the VM will systematically create a new qcow2 based on
+the backing file, therefore the VM will always be stateless. If set to false, the start of the VM will use the
+COW (<vmname>.qcow2) file if it exists, otherwise the first start will create a new qcow2 based on the backing
+file given as backingfile.
+
+If set to true, the (<vmname>.qcow2) file will be re-created at each start, based on the backing file (if defined)
+
+
+
+
+
+List of directories for the virt-copy-in before booting the VM. Used only in stateless mode.
+
+List of directories for the virt-copy-in before booting the VM stateless mode.
+
+
+
+
+
+virsh shutdown method to use. Please verify that it is supported by your virsh toolsed with 'virsh help shutdown'
+When this parameter is set --mode shutdown_mode is passed as an additional argument to the 'virsh shutdown' command.
+One can use this option in case default acpi method does not work. Verify that this mode is supported
+by your VM. By default --mode is not passed.
+
+
+Instruct virsh to use specific shutdown mode
+
+
+
+
+
+
+Start the virtual storage pools and networks used by the virtual machine before starting it or before live migrating it.
+
+
+Ensure the needed virtual storage pools and networks are started
+
+
+
+
+
+
+Work on Seapath cluster.
+
+Enable seapath cluster support
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+EOF
+}
+
+set_util_attr() {
+ local attr=$1 val=$2
+ local cval outp
+
+ cval=$(crm_resource -Q -r $OCF_RESOURCE_INSTANCE -z -g $attr 2>/dev/null)
+ if [ $? -ne 0 ] && [ -z "$cval" ]; then
+ crm_resource -Q -r $OCF_RESOURCE_INSTANCE -z -g $attr 2>&1 | grep -e "not connected" > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ ocf_log debug "Unable to set utilization attribute, cib is not available"
+ return
+ fi
+ fi
+
+ if [ "$cval" != "$val" ]; then
+ outp=$(crm_resource -r $OCF_RESOURCE_INSTANCE -z -p $attr -v $val 2>&1) ||
+ ocf_log warn "crm_resource failed to set utilization attribute $attr: $outp"
+ fi
+}
+
+unset_util_attr() {
+ local attr=$1
+ local cval outp
+
+ outp=$(crm_resource --resource=$OCF_RESOURCE_INSTANCE --utilization --delete-parameter=$attr 2>&1) ||
+ ocf_log warn "crm_resource failed to unset utilization attribute $attr: $outp"
+}
+
+update_utilization() {
+ local dom_cpu dom_mem
+
+ if ocf_is_true "$OCF_RESKEY_autoset_utilization_cpu"; then
+ dom_cpu=$(LANG=C virsh $VIRSH_OPTIONS dominfo ${DOMAIN_NAME} 2>/dev/null | awk '/CPU\(s\)/{print $2}')
+ test -n "$dom_cpu" && set_util_attr cpu $dom_cpu
+ elif ocf_is_true "$OCF_RESKEY_unset_utilization_cpu"; then
+ unset_util_attr cpu
+ fi
+
+ if ocf_is_true "$OCF_RESKEY_autoset_utilization_host_memory"; then
+ dom_mem=$(LANG=C virsh $VIRSH_OPTIONS dominfo ${DOMAIN_NAME} 2>/dev/null | awk '/Max memory/{printf("%d", $3/1024)}')
+ test -n "$dom_mem" && set_util_attr host_memory "$dom_mem"
+ elif ocf_is_true "$OCF_RESKEY_unset_utilization_host_memory"; then
+ unset_util_attr host_memory
+ fi
+
+ if ocf_is_true "$OCF_RESKEY_autoset_utilization_hv_memory"; then
+ dom_mem=$(LANG=C virsh $VIRSH_OPTIONS dominfo ${DOMAIN_NAME} 2>/dev/null | awk '/Max memory/{printf("%d", $3/1024)}')
+ test -n "$dom_mem" && set_util_attr hv_memory "$dom_mem"
+ elif ocf_is_true "$OCF_RESKEY_unset_utilization_hv_memory"; then
+ unset_util_attr hv_memory
+ fi
+}
+
+get_emulator()
+{
+ local emulator=""
+
+ emulator=$(virsh $VIRSH_OPTIONS dumpxml $DOMAIN_NAME 2>/dev/null | sed -n -e 's/^.*\(.*\)<\/emulator>.*$/\1/p')
+ if [ -z "$emulator" ] && [ -e "$EMULATOR_STATE" ]; then
+ emulator=$(cat $EMULATOR_STATE)
+ fi
+ if [ -z "$emulator" ]; then
+ emulator=$(cat ${OCF_RESKEY_config} | sed -n -e 's/^.*\(.*\)<\/emulator>.*$/\1/p')
+ fi
+
+ if [ -n "$emulator" ]; then
+ basename $emulator
+ fi
+}
+
+update_emulator_cache()
+{
+ local emulator
+
+ emulator=$(get_emulator)
+ if [ -n "$emulator" ]; then
+ echo $emulator > $EMULATOR_STATE
+ fi
+}
+
+# attempt to check domain status outside of libvirt using the emulator process
+pid_status()
+{
+ local rc=$OCF_ERR_GENERIC
+ local emulator=$(get_emulator)
+ # An emulator is not required, so only report message in debug mode
+ local loglevel="debug"
+
+ if ocf_is_probe; then
+ loglevel="notice"
+ fi
+
+ case "$emulator" in
+ qemu-kvm|qemu-dm|qemu-system-*)
+ rc=$OCF_NOT_RUNNING
+ ps awx | grep -E "[q]emu-(kvm|dm|system).*-name ($DOMAIN_NAME|[^ ]*guest=$DOMAIN_NAME(,[^ ]*)?) " > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ rc=$OCF_SUCCESS
+ fi
+ ;;
+ libvirt_lxc)
+ rc=$OCF_NOT_RUNNING
+ ps awx | grep -E "[l]ibvirt_lxc.*-name ($DOMAIN_NAME|[^ ]*guest=$DOMAIN_NAME(,[^ ]*)?) " > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ rc=$OCF_SUCCESS
+ fi
+ ;;
+ # This can be expanded to check for additional emulators
+ *)
+ # We may be running xen with PV domains, they don't
+ # have an emulator set. try xl list or xen-lists
+ if have_binary xl; then
+ rc=$OCF_NOT_RUNNING
+ xl list $DOMAIN_NAME >/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ rc=$OCF_SUCCESS
+ fi
+ elif have_binary xen-list; then
+ rc=$OCF_NOT_RUNNING
+ xen-list $DOMAIN_NAME 2>/dev/null | grep -qs "State.*[-r][-b][-p]--" 2>/dev/null
+ if [ $? -eq 0 ]; then
+ rc=$OCF_SUCCESS
+ fi
+ else
+ ocf_log $loglevel "Unable to determine emulator for $DOMAIN_NAME"
+ fi
+ ;;
+ esac
+
+ if [ $rc -eq $OCF_SUCCESS ]; then
+ ocf_log debug "Virtual domain $DOMAIN_NAME is currently running."
+ elif [ $rc -eq $OCF_NOT_RUNNING ]; then
+ ocf_log debug "Virtual domain $DOMAIN_NAME is currently not running."
+ fi
+
+ return $rc
+}
+
+VirtualDomain_status() {
+ local try=0
+ rc=$OCF_ERR_GENERIC
+ status="no state"
+ while [ "$status" = "no state" ]; do
+ try=$(($try + 1 ))
+ status=$(LANG=C virsh $VIRSH_OPTIONS domstate $DOMAIN_NAME 2>&1 | tr 'A-Z' 'a-z')
+ case "$status" in
+ *"error:"*"domain not found"|*"error:"*"failed to get domain"*|"shut off")
+ # shut off: domain is defined, but not started, will not happen if
+ # domain is created but not defined
+ # "Domain not found" or "failed to get domain": domain is not defined
+ # and thus not started
+ ocf_log debug "Virtual domain $DOMAIN_NAME is not running: $(echo $status | sed s/error://g)"
+ rc=$OCF_NOT_RUNNING
+ ;;
+ running|paused|idle|blocked|"in shutdown")
+ # running: domain is currently actively consuming cycles
+ # paused: domain is paused (suspended)
+ # idle: domain is running but idle
+ # blocked: synonym for idle used by legacy Xen versions
+ # in shutdown: the domain is in process of shutting down, but has not completely shutdown or crashed.
+ ocf_log debug "Virtual domain $DOMAIN_NAME is currently $status."
+ rc=$OCF_SUCCESS
+ ;;
+ ""|*"failed to "*"connect to the hypervisor"*|"no state")
+ # Empty string may be returned when virsh does not
+ # receive a reply from libvirtd.
+ # "no state" may occur when the domain is currently
+ # being migrated (on the migration target only), or
+ # whenever virsh can't reliably obtain the domain
+ # state.
+ status="no state"
+ if [ "$__OCF_ACTION" = "stop" ] && [ $try -ge 3 ]; then
+ # During the stop operation, we want to bail out
+ # quickly, so as to be able to force-stop (destroy)
+ # the domain if necessary.
+ ocf_exit_reason "Virtual domain $DOMAIN_NAME has no state during stop operation, bailing out."
+ return $OCF_ERR_GENERIC;
+ elif [ "$__OCF_ACTION" = "monitor" ]; then
+ pid_status
+ rc=$?
+ if [ $rc -ne $OCF_ERR_GENERIC ]; then
+ # we've successfully determined the domains status outside of libvirt
+ return $rc
+ fi
+
+ else
+ # During all other actions, we just wait and try
+ # again, relying on the CRM/LRM to time us out if
+ # this takes too long.
+ ocf_log info "Virtual domain $DOMAIN_NAME currently has no state, retrying."
+ fi
+ sleep 1
+ ;;
+ *)
+ # any other output is unexpected.
+ ocf_log error "Virtual domain $DOMAIN_NAME has unknown status \"$status\"!"
+ sleep 1
+ ;;
+ esac
+ done
+ return $rc
+}
+
+# virsh undefine removes configuration files if they are in
+# directories which are managed by libvirt. such directories
+# include also subdirectories of /etc (for instance
+# /etc/libvirt/*) which may be surprising. VirtualDomain didn't
+# include the undefine call before, hence this wasn't an issue
+# before.
+#
+# There seems to be no way to find out which directories are
+# managed by libvirt.
+#
+verify_undefined() {
+ local tmpf
+ if virsh --connect=${OCF_RESKEY_hypervisor} list --all --name 2>/dev/null | grep -wqs "$DOMAIN_NAME"
+ then
+ tmpf=$(mktemp -t vmcfgsave.XXXXXX)
+ if [ ! -r "$tmpf" ]; then
+ ocf_log warn "unable to create temp file, disk full?"
+ # we must undefine the domain
+ virsh $VIRSH_OPTIONS undefine --nvram $DOMAIN_NAME > /dev/null 2>&1
+ else
+ cp -p $OCF_RESKEY_config $tmpf
+ virsh $VIRSH_OPTIONS undefine --nvram $DOMAIN_NAME > /dev/null 2>&1
+ [ -f $OCF_RESKEY_config ] || cp -f $tmpf $OCF_RESKEY_config
+ rm -f $tmpf
+ fi
+ fi
+}
+
+start_resources() {
+ local virsh_opts="--connect=$1 --quiet"
+ local pool_state net_state
+ for pool in `sed -n "s/^.*pool=['\"]\([^'\"]\+\)['\"].*\$/\1/gp" ${OCF_RESKEY_config} | sort | uniq`; do
+ pool_state=`LANG=C virsh ${virsh_opts} pool-info ${pool} | sed -n 's/^State: \+\(.*\)$/\1/gp'`
+ if [ "$pool_state" != "running" ]; then
+ virsh ${virsh_opts} pool-start $pool
+ if [ $? -ne 0 ]; then
+ ocf_exit_reason "Failed to start required virtual storage pool ${pool}."
+ return $OCF_ERR_GENERIC
+ fi
+ else
+ virsh ${virsh_opts} pool-refresh $pool
+ fi
+ done
+
+ for net in `sed -n "s/^.*network=['\"]\([^'\"]\+\)['\"].*\$/\1/gp" ${OCF_RESKEY_config} | sort | uniq`; do
+ net_state=`LANG=C virsh ${virsh_opts} net-info ${net} | sed -n 's/^Active: \+\(.*\)$/\1/gp'`
+ if [ "$net_state" != "yes" ]; then
+ virsh ${virsh_opts} net-start $net
+ if [ $? -ne 0 ]; then
+ ocf_exit_reason "Failed to start required virtual network ${net}."
+ return $OCF_ERR_GENERIC
+ fi
+ fi
+ done
+
+ return $OCF_SUCCESS
+}
+
+restore_config() {
+ if ocf_is_true $OCF_RESKEY_seapath ; then
+ local disk_name=system_$DOMAIN_NAME
+ if rbd info $disk_name > /dev/null 2>&1 ; then
+ rbd image-meta get $disk_name xml > $OCF_RESKEY_config
+ fi
+ fi
+}
+
+VirtualDomain_start() {
+ local snapshotimage
+
+ if VirtualDomain_status; then
+ ocf_log info "Virtual domain $DOMAIN_NAME already running."
+ return $OCF_SUCCESS
+ fi
+
+ # systemd drop-in to stop domain before libvirtd terminates services
+ # during shutdown/reboot
+ if systemd_is_running ; then
+ systemd_drop_in "99-VirtualDomain-libvirt" "After" "libvirtd.service"
+ systemd_drop_in "99-VirtualDomain-machines" "Wants" "virt-guest-shutdown.target"
+ systemctl start virt-guest-shutdown.target
+ fi
+
+ snapshotimage="$OCF_RESKEY_snapshot/${DOMAIN_NAME}.state"
+ if [ -n "$OCF_RESKEY_snapshot" -a -f "$snapshotimage" ]; then
+ virsh restore $snapshotimage
+ if [ $? -eq 0 ]; then
+ rm -f $snapshotimage
+ return $OCF_SUCCESS
+ fi
+ ocf_exit_reason "Failed to restore ${DOMAIN_NAME} from state file in ${OCF_RESKEY_snapshot} directory."
+ return $OCF_ERR_GENERIC
+ fi
+
+ restore_config
+ # Make sure domain is undefined before creating.
+ # The 'create' command guarantees that the domain will be
+ # undefined on shutdown, but requires the domain to be undefined.
+ # if a user defines the domain
+ # outside of this agent, we have to ensure that the domain
+ # is restored to an 'undefined' state before creating.
+ verify_undefined
+
+ if ocf_is_true "${OCF_RESKEY_start_resources}"; then
+ start_resources ${OCF_RESKEY_hypervisor}
+ rc=$?
+ if [ $rc -eq $OCF_ERR_GENERIC ]; then
+ return $rc
+ fi
+ fi
+
+ if [ -z "${OCF_RESKEY_backingfile}" ]; then
+ virsh $VIRSH_OPTIONS create ${OCF_RESKEY_config}
+ if [ $? -ne 0 ]; then
+ ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}."
+ return $OCF_ERR_GENERIC
+ fi
+ else
+ if ocf_is_true "${OCF_RESKEY_stateless}" || [ ! -s "${OCF_RESKEY_config%%.*}.qcow2" ]; then
+ # Create the Stateless image
+ dirconfig=`dirname ${OCF_RESKEY_config}`
+ qemu-img create -f qcow2 -b ${OCF_RESKEY_backingfile} ${OCF_RESKEY_config%%.*}.qcow2
+ if [ $? -ne 0 ]; then
+ ocf_exit_reason "Failed qemu-img create ${DOMAIN_NAME} with backing file ${OCF_RESKEY_backingfile}."
+ return $OCF_ERR_GENERIC
+ fi
+
+ virsh define ${OCF_RESKEY_config}
+ if [ $? -ne 0 ]; then
+ ocf_exit_reason "Failed to define virtual domain ${DOMAIN_NAME}."
+ return $OCF_ERR_GENERIC
+ fi
+
+ if [ -n "${OCF_RESKEY_copyindirs}" ]; then
+ # Inject copyindirs directories and files
+ virt-copy-in -d ${DOMAIN_NAME} ${OCF_RESKEY_copyindirs} /
+ if [ $? -ne 0 ]; then
+ ocf_exit_reason "Failed on virt-copy-in command ${DOMAIN_NAME}."
+ return $OCF_ERR_GENERIC
+ fi
+ fi
+ else
+ virsh define ${OCF_RESKEY_config}
+ if [ $? -ne 0 ]; then
+ ocf_exit_reason "Failed to define virtual domain ${DOMAIN_NAME}."
+ return $OCF_ERR_GENERIC
+ fi
+ fi
+
+ virsh $VIRSH_OPTIONS start ${DOMAIN_NAME}
+ if [ $? -ne 0 ]; then
+ ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}."
+ return $OCF_ERR_GENERIC
+ fi
+ fi
+
+ while ! VirtualDomain_monitor; do
+ sleep 1
+ done
+
+ return $OCF_SUCCESS
+}
+
+force_stop()
+{
+ local out ex translate
+ local status=0
+
+ ocf_log info "Issuing forced shutdown (destroy) request for domain ${DOMAIN_NAME}."
+ out=$(LANG=C virsh $VIRSH_OPTIONS destroy ${DOMAIN_NAME} 2>&1)
+ ex=$?
+ translate=$(echo $out|tr 'A-Z' 'a-z')
+ echo >&2 "$translate"
+ case $ex$translate in
+ *"error:"*"domain is not running"*|*"error:"*"domain not found"*|\
+ *"error:"*"failed to get domain"*)
+ : ;; # unexpected path to the intended outcome, all is well
+ [!0]*)
+ ocf_exit_reason "forced stop failed"
+ return $OCF_ERR_GENERIC ;;
+ 0*)
+ while [ $status != $OCF_NOT_RUNNING ]; do
+ VirtualDomain_status
+ status=$?
+ done ;;
+ esac
+ return $OCF_SUCCESS
+}
+
+sync_config(){
+ ocf_log info "Syncing $DOMAIN_NAME config file with csync2 -x ${OCF_RESKEY_config}"
+ if ! csync2 -x ${OCF_RESKEY_config}; then
+ ocf_log warn "Syncing ${OCF_RESKEY_config} failed.";
+ fi
+}
+
+save_config(){
+ CFGTMP=$(mktemp -t vmcfgsave.XXX)
+ virsh $VIRSH_OPTIONS dumpxml --inactive --security-info ${DOMAIN_NAME} > ${CFGTMP}
+ if [ -s ${CFGTMP} ]; then
+ if ! cmp -s ${CFGTMP} ${OCF_RESKEY_config}; then
+ if virt-xml-validate ${CFGTMP} domain 2>/dev/null ; then
+ ocf_log info "Saving domain $DOMAIN_NAME to ${OCF_RESKEY_config}. Please make sure it's present on all nodes or sync_config_on_stop is on."
+ if cat ${CFGTMP} > ${OCF_RESKEY_config} ; then
+ ocf_log info "Saved $DOMAIN_NAME domain's configuration to ${OCF_RESKEY_config}."
+ if ocf_is_true "$OCF_RESKEY_sync_config_on_stop"; then
+ sync_config
+ fi
+ else
+ ocf_log warn "Moving ${CFGTMP} to ${OCF_RESKEY_config} failed."
+ fi
+ else
+ ocf_log warn "Domain $DOMAIN_NAME config failed to validate after dump. Skipping config update."
+ fi
+ fi
+ else
+ ocf_log warn "Domain $DOMAIN_NAME config has 0 size. Skipping config update."
+ fi
+ rm -f ${CFGTMP}
+}
+
+VirtualDomain_stop() {
+ local i
+ local status
+ local shutdown_timeout
+ local needshutdown=1
+
+ VirtualDomain_status
+ status=$?
+
+ case $status in
+ $OCF_SUCCESS)
+ if ocf_is_true $OCF_RESKEY_force_stop; then
+ # if force stop, don't bother attempting graceful shutdown.
+ force_stop
+ return $?
+ fi
+
+ ocf_log info "Issuing graceful shutdown request for domain ${DOMAIN_NAME}."
+
+ if [ -n "$OCF_RESKEY_snapshot" ]; then
+ virsh save $DOMAIN_NAME "$OCF_RESKEY_snapshot/${DOMAIN_NAME}.state"
+ if [ $? -eq 0 ]; then
+ needshutdown=0
+ else
+ ocf_log error "Failed to save snapshot state of ${DOMAIN_NAME} on stop"
+ fi
+ fi
+
+ # save config if needed
+ if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then
+ save_config
+ fi
+
+ # issue the shutdown if save state didn't shutdown for us
+ if [ $needshutdown -eq 1 ]; then
+ # Issue a graceful shutdown request
+ if [ -n "${OCF_RESKEY_CRM_shutdown_mode}" ]; then
+ shutdown_opts="--mode ${OCF_RESKEY_CRM_shutdown_mode}"
+ fi
+ ocf_log info "virsh $VIRSH_OPTIONS shutdown ${DOMAIN_NAME} $shutdown_opts"
+ timeout 1s virsh $VIRSH_OPTIONS shutdown ${DOMAIN_NAME} $shutdown_opts
+ virsh_timeout_status=$?
+ if [ $virsh_timeout_status -eq 124 ] #timeout, there's something wrong with the guest
+ then
+ # Something went wrong, break from switch case, and resort to forced stop (destroy).
+ ocf_log info "${DOMAIN_NAME} shutdown problem, force_stop"
+ force=1
+ else
+ force=0
+ fi
+ fi
+
+ # The "shutdown_timeout" we use here is the operation
+ # timeout specified in the CIB, minus 17 seconds (because it has been seen cases where the destroy operation takes more than 15s)
+ shutdown_timeout=$(( $NOW + ($OCF_RESKEY_CRM_meta_timeout/1000) -17 ))
+ # Loop on status until we reach $shutdown_timeout
+ ocf_log info "${DOMAIN_NAME} shutdown_timeout=$shutdown_timeout, NOW=$NOW"
+ while [ $NOW -lt $shutdown_timeout -a $force -ne 1 ]; do
+ VirtualDomain_status
+ status=$?
+ ocf_log info "${DOMAIN_NAME} VirtualDomain_status $status"
+ case $status in
+ $OCF_NOT_RUNNING)
+ # This was a graceful shutdown.
+ ocf_log info "${DOMAIN_NAME} This was a graceful shutdown."
+ return $OCF_SUCCESS
+ ;;
+ $OCF_SUCCESS)
+ # Domain is still running, keep
+ # waiting (until shutdown_timeout
+ # expires)
+ ocf_log info "${DOMAIN_NAME} sleep 1."
+ sleep 1
+ ;;
+ *)
+ # Something went wrong. Bail out and
+ # resort to forced stop (destroy).
+ break;
+ esac
+ NOW=$(date +%s)
+ done
+ ;;
+ $OCF_NOT_RUNNING)
+ ocf_log info "Domain $DOMAIN_NAME already stopped."
+ return $OCF_SUCCESS
+ esac
+
+ # OK. Now if the above graceful shutdown hasn't worked, kill
+ # off the domain with destroy. If that too does not work,
+ # have the LRM time us out.
+ ocf_log info "${DOMAIN_NAME} force_stop"
+ force_stop
+}
+
+mk_migrateuri() {
+ local target_node
+ local migrate_target
+ local hypervisor
+
+ target_node="$OCF_RESKEY_CRM_meta_migrate_target"
+
+ # A typical migration URI via a special migration network looks
+ # like "tcp://bar-mig:49152". The port would be randomly chosen
+ # by libvirt from the range 49152-49215 if omitted, at least since
+ # version 0.7.4 ...
+ if [ -n "${OCF_RESKEY_migration_network_suffix}" ]; then
+ hypervisor="${OCF_RESKEY_hypervisor%%[+:]*}"
+ # Hostname might be a FQDN
+ migrate_target=$(echo ${target_node} | sed -e "s,^\([^.]\+\),\1${OCF_RESKEY_migration_network_suffix},")
+ case $hypervisor in
+ qemu)
+ # For quiet ancient libvirt versions a migration port is needed
+ # and the URI must not contain the "//". Newer versions can handle
+ # the "bad" URI.
+ echo "tcp:${migrate_target}:${OCF_RESKEY_migrateport}"
+ ;;
+ xen)
+ echo "${migrate_target}"
+ ;;
+ *)
+ ocf_log warn "$DOMAIN_NAME: Migration via dedicated network currently not supported for ${hypervisor}."
+ ;;
+ esac
+ fi
+}
+
+VirtualDomain_migrate_to() {
+ local rc
+ local target_node
+ local remoteuri
+ local transport_suffix
+ local migrateuri
+ local migrate_opts
+ local migrate_pid
+
+ target_node="$OCF_RESKEY_CRM_meta_migrate_target"
+
+ if VirtualDomain_status; then
+ # Find out the remote hypervisor to connect to. That is, turn
+ # something like "qemu://foo:9999/system" into
+ # "qemu+tcp://bar:9999/system"
+
+ if [ -n "${OCF_RESKEY_remoteuri}" ]; then
+ remoteuri=`echo "${OCF_RESKEY_remoteuri}" |
+ sed "s/%n/$target_node/g"`
+ else
+ if [ -n "${OCF_RESKEY_migration_transport}" ]; then
+ transport_suffix="+${OCF_RESKEY_migration_transport}"
+ fi
+
+ # append user defined suffix if virsh target should differ from cluster node name
+ if [ -n "${OCF_RESKEY_migration_network_suffix}" ]; then
+ # Hostname might be a FQDN
+ target_node=$(echo ${target_node} | sed -e "s,^\([^.]\+\),\1${OCF_RESKEY_migration_network_suffix},")
+ fi
+
+ # a remote user has been defined to connect to target_node
+ if echo ${OCF_RESKEY_migration_user} | grep -q "^[a-z][-a-z0-9]*$" ; then
+ target_node="${OCF_RESKEY_migration_user}@${target_node}"
+ fi
+
+ # Scared of that sed expression? So am I. :-)
+ remoteuri=$(echo ${OCF_RESKEY_hypervisor} | sed -e "s,\(.*\)://[^/:]*\(:\?[0-9]*\)/\(.*\),\1${transport_suffix}://${target_node}\2/\3,")
+ fi
+
+ # User defined migrateuri or do we make one?
+ migrate_opts="$OCF_RESKEY_migrate_options"
+
+ # migration_uri is directly set
+ if [ -n "${OCF_RESKEY_migrateuri}" ]; then
+ migrateuri=`echo "${OCF_RESKEY_migrateuri}" |
+ sed "s/%n/$target_node/g"`
+
+ # extract migrationuri from options
+ elif echo "$migrate_opts" | fgrep -qs -- "--migrateuri="; then
+ migrateuri=`echo "$migrate_opts" |
+ sed "s/.*--migrateuri=\([^ ]*\).*/\1/;s/%n/$target_node/g"`
+
+ # auto generate
+ else
+ migrateuri=`mk_migrateuri`
+ fi
+
+ # remove --migrateuri from migration_opts
+ migrate_opts=`echo "$migrate_opts" |
+ sed "s/\(.*\)--migrateuri=[^ ]*\(.*\)/\1\2/"`
+
+
+ # save config if needed
+ if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then
+ save_config
+ fi
+
+ if ocf_is_true "${OCF_RESKEY_start_resources}"; then
+ start_resources $remoteuri
+ rc=$?
+ if [ $rc -eq $OCF_ERR_GENERIC ]; then
+ return $rc
+ fi
+ fi
+
+ # Live migration speed limit
+ if [ ${OCF_RESKEY_migration_speed} -ne 0 ]; then
+ ocf_log info "$DOMAIN_NAME: Setting live migration speed limit for $DOMAIN_NAME (using: virsh ${VIRSH_OPTIONS} migrate-setspeed $DOMAIN_NAME ${OCF_RESKEY_migration_speed})."
+ virsh ${VIRSH_OPTIONS} migrate-setspeed $DOMAIN_NAME ${OCF_RESKEY_migration_speed}
+ fi
+
+ # OK, we know where to connect to. Now do the actual migration.
+ ocf_log info "$DOMAIN_NAME: Starting live migration to ${target_node} (using: virsh ${VIRSH_OPTIONS} migrate --live $migrate_opts $DOMAIN_NAME $remoteuri $migrateuri)."
+ virsh ${VIRSH_OPTIONS} migrate --live $migrate_opts $DOMAIN_NAME $remoteuri $migrateuri &
+
+ migrate_pid=${!}
+
+ # Live migration downtime interval
+ # Note: You can set downtime only while live migration is in progress
+ if [ ${OCF_RESKEY_migration_downtime} -ne 0 ]; then
+ sleep 2
+ ocf_log info "$DOMAIN_NAME: Setting live migration downtime for $DOMAIN_NAME (using: virsh ${VIRSH_OPTIONS} migrate-setmaxdowntime $DOMAIN_NAME ${OCF_RESKEY_migration_downtime})."
+ virsh ${VIRSH_OPTIONS} migrate-setmaxdowntime $DOMAIN_NAME ${OCF_RESKEY_migration_downtime}
+ fi
+
+ wait ${migrate_pid}
+
+ rc=$?
+ if [ $rc -ne 0 ]; then
+ ocf_exit_reason "$DOMAIN_NAME: live migration to ${target_node} failed: $rc"
+ return $OCF_ERR_GENERIC
+ else
+ ocf_log info "$DOMAIN_NAME: live migration to ${target_node} succeeded."
+ return $OCF_SUCCESS
+ fi
+ else
+ ocf_exit_reason "$DOMAIN_NAME: migrate_to: Not active locally!"
+ return $OCF_ERR_GENERIC
+ fi
+}
+
+VirtualDomain_migrate_from() {
+ # systemd drop-in to stop domain before libvirtd terminates services
+ # during shutdown/reboot
+ if systemd_is_running ; then
+ systemd_drop_in "99-VirtualDomain-libvirt" "After" "libvirtd.service"
+ systemd_drop_in "99-VirtualDomain-machines" "Wants" "virt-guest-shutdown.target"
+ systemctl start virt-guest-shutdown.target
+ fi
+
+ while ! VirtualDomain_monitor; do
+ sleep 1
+ done
+ ocf_log info "$DOMAIN_NAME: live migration from ${OCF_RESKEY_CRM_meta_migrate_source} succeeded."
+ # save config if needed
+ if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then
+ save_config
+ fi
+ return $OCF_SUCCESS
+}
+
+VirtualDomain_monitor() {
+ # First, check the domain status. If that returns anything other
+ # than $OCF_SUCCESS, something is definitely wrong.
+ VirtualDomain_status
+ rc=$?
+ if [ ${rc} -eq ${OCF_SUCCESS} ]; then
+ # OK, the generic status check turned out fine. Now, if we
+ # have monitor scripts defined, run them one after another.
+ for script in ${OCF_RESKEY_monitor_scripts}; do
+ script_output="$($script 2>&1)"
+ script_rc=$?
+ if [ ${script_rc} -ne ${OCF_SUCCESS} ]; then
+ # A monitor script returned a non-success exit
+ # code. Stop iterating over the list of scripts, log a
+ # warning message, and propagate $OCF_ERR_GENERIC.
+ ocf_exit_reason "Monitor command \"${script}\" for domain ${DOMAIN_NAME} returned ${script_rc} with output: ${script_output}"
+ rc=$OCF_ERR_GENERIC
+ break
+ else
+ ocf_log debug "Monitor command \"${script}\" for domain ${DOMAIN_NAME} completed successfully with output: ${script_output}"
+ fi
+ done
+ fi
+
+ update_emulator_cache
+ update_utilization
+ # Save configuration on monitor as well, so we will have a better chance of
+ # having fresh and up to date config files on all nodes.
+ if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then
+ save_config
+ fi
+
+ return ${rc}
+}
+
+VirtualDomain_validate_all() {
+ if ocf_is_true $OCF_RESKEY_force_stop && [ -n "$OCF_RESKEY_snapshot" ]; then
+ ocf_exit_reason "The 'force_stop' and 'snapshot' options can not be used together."
+ return $OCF_ERR_CONFIGURED
+ fi
+
+ if [ ! -r $OCF_RESKEY_config ] ; then
+ restore_config
+ fi
+
+ # check if we can read the config file (otherwise we're unable to
+ # deduce $DOMAIN_NAME from it, see below)
+ if [ ! -r $OCF_RESKEY_config ]; then
+ if ocf_is_probe; then
+ ocf_log info "Configuration file $OCF_RESKEY_config not readable during probe."
+ elif [ "$__OCF_ACTION" = "stop" ]; then
+ ocf_log info "Configuration file $OCF_RESKEY_config not readable, resource considered stopped."
+ else
+ ocf_exit_reason "Configuration file $OCF_RESKEY_config does not exist or not readable."
+ fi
+ return $OCF_ERR_INSTALLED
+ fi
+
+ if [ -z $DOMAIN_NAME ]; then
+ ocf_exit_reason "Unable to determine domain name."
+ return $OCF_ERR_INSTALLED
+ fi
+
+ # Check if csync2 is available when config tells us we might need it.
+ if ocf_is_true $OCF_RESKEY_sync_config_on_stop; then
+ check_binary csync2
+ fi
+
+ # Check if migration_speed is a decimal value
+ if ! ocf_is_decimal ${OCF_RESKEY_migration_speed}; then
+ ocf_exit_reason "migration_speed has to be a decimal value"
+ return $OCF_ERR_CONFIGURED
+ fi
+
+ # Check if migration_downtime is a decimal value
+ if ! ocf_is_decimal ${OCF_RESKEY_migration_downtime}; then
+ ocf_exit_reason "migration_downtime has to be a decimal value"
+ return $OCF_ERR_CONFIGURED
+ fi
+
+ if ocf_is_true "${OCF_RESKEY_stateless}" && [ -z "${OCF_RESKEY_backingfile}" ]; then
+ ocf_exit_reason "Stateless functionality can't be achieved without a backing file."
+ return $OCF_ERR_CONFIGURED
+ fi
+}
+
+VirtualDomain_getconfig() {
+ # Grab the virsh uri default, but only if hypervisor isn't set
+ : ${OCF_RESKEY_hypervisor=$(virsh --quiet uri 2>/dev/null)}
+
+ # Set options to be passed to virsh:
+ VIRSH_OPTIONS="--connect=${OCF_RESKEY_hypervisor} --quiet"
+ if ocf_is_true $OCF_RESKEY_seapath ; then
+ # Retrieve the domain name from xml filename
+ DOMAIN_NAME=`basename ${OCF_RESKEY_config} | cut -d '.' -f 1`
+ else
+ # Retrieve the domain name from the xml file.
+ DOMAIN_NAME=`egrep '[[:space:]]*.*[[:space:]]*$' ${OCF_RESKEY_config} 2>/dev/null | sed -e 's/[[:space:]]*\(.*\)<\/name>[[:space:]]*$/\1/'`
+ fi
+
+ EMULATOR_STATE="${HA_RSCTMP}/VirtualDomain-${DOMAIN_NAME}-emu.state"
+}
+
+OCF_REQUIRED_PARAMS="config"
+OCF_REQUIRED_BINARIES="virsh sed"
+ocf_rarun $*
diff --git a/roles/centos_physical_machine/files/pacemaker_ra/ntpstatus b/roles/centos_physical_machine/files/pacemaker_ra/ntpstatus
new file mode 100755
index 000000000..5ac67370b
--- /dev/null
+++ b/roles/centos_physical_machine/files/pacemaker_ra/ntpstatus
@@ -0,0 +1,320 @@
+#!/bin/sh
+#
+# ocf:seapath:ntpstatus resource agent
+#
+# Original copyright 2004 SUSE LINUX AG, Lars Marowsky-Bre
+# Later changes copyright 2008-2019 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# (GPLv2) WITHOUT ANY WARRANTY.
+#
+# crm config example:
+#primitive ntpstatus_test ocf:seapath:ntpstatus \
+# op monitor timeout=10 interval=10
+#clone cl_ntpstatus_test ntpstatus_test \
+# meta target-role=Started
+#location ntp_test_debian debian \
+# rule ntpstatus: defined ntpstatus
+#
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS:="${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs"}
+. "${OCF_FUNCTIONS}"
+: ${__OCF_ACTION:="$1"}
+
+#######################################################################
+
+meta_data() {
+ cat <
+
+
+1.0
+
+
+Checks the status of the connectivity to a ntp server
+
+Checks the status of the connectivity to a ntp server
+
+
+
+
+Location to store the resource state in.
+
+State file
+
+
+
+
+
+ntpstatus
+
+ntpstatus
+
+
+
+
+
+Number of seconds to sleep during operations. This can be used to test how
+the cluster reacts to operation timeouts.
+
+Operation sleep duration in seconds.
+
+
+
+
+
+Start actions will return failure if running on the host specified here, but
+the resource will start successfully anyway (future monitor calls will find it
+running). This can be used to test on-fail=ignore.
+
+Report bogus start failure on specified host
+
+
+
+
+
+If this is set, the environment will be dumped to this file for every call.
+
+Environment dump file
+
+
+
+
+
+The number by which to multiply the connectivity (0 or 1) by
+
+Value multiplier
+
+
+
+
+
+ip address to check the connectivity to
+
+Host IP
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+END
+}
+
+#######################################################################
+
+# don't exit on TERM, to test that pacemaker-execd makes sure that we do exit
+trap sigterm_handler TERM
+sigterm_handler() {
+ ocf_log info "They use TERM to bring us down. No such luck."
+
+ # Since we're likely going to get KILLed, clean up any monitor
+ # serialization in progress, so the next probe doesn't return an error.
+ rm -f "${VERIFY_SERIALIZED_FILE}"
+ return
+}
+
+ntpstatus_usage() {
+ cat <> "${OCF_RESKEY_envfile}"
+ fi
+}
+
+ntpstatus_update() {
+ # get the 5th column from chrony sources output (reach), convert from octal to binary and get the last digit (last contact with server)
+ # 1 --> last contact fine
+ # 0 --> last contact problem
+ status=`echo $(/usr/bin/chronyc -n sources | grep -E "$OCF_RESKEY_host_ip" | awk '{print $5}') | python3 -c "print(\"{0:b}\".format(int(input() or \"0\",8))[-1])"`
+ case "${status#[+]}" in
+ ''|*[!0-9]*)
+ status=0
+ ;;
+ esac
+ status=$(expr $status \* $OCF_RESKEY_multiplier)
+ if [ "$__OCF_ACTION" = "start" ] ; then
+ attrd_updater -n "$OCF_RESKEY_ntpstatus" -B "$status" -d "$OCF_RESKEY_dampen" $attrd_options
+ else
+ attrd_updater -n "$OCF_RESKEY_ntpstatus" -v "$status" -d "$OCF_RESKEY_dampen" $attrd_options
+ fi
+ rc=$?
+ case $rc in
+ 0) #ocf_log info "Updated $OCF_RESKEY_ntpstatus = $status"
+ ;;
+ *) ocf_log warn "Could not update $OCF_RESKEY_ntpstatus = $status: rc=$rc";;
+ esac
+ if [ $rc -ne 0 ]; then
+ return $rc
+ fi
+}
+
+ntpstatus_start() {
+ ntpstatus_monitor
+
+ DS_RETVAL=$?
+ if [ $DS_RETVAL -eq $OCF_SUCCESS ]; then
+ if [ "$(uname -n)" = "${OCF_RESKEY_fail_start_on}" ]; then
+ DS_RETVAL=$OCF_ERR_GENERIC
+ fi
+ return $DS_RETVAL
+ fi
+
+ touch "${OCF_RESKEY_state}"
+ DS_RETVAL=$?
+ if [ "$(uname -n)" = "${OCF_RESKEY_fail_start_on}" ]; then
+ DS_RETVAL=$OCF_ERR_GENERIC
+ fi
+ ntpstatus_update
+ return $DS_RETVAL
+}
+
+ntpstatus_stop() {
+ ntpstatus_monitor --force
+ attrd_updater -D -n "$OCF_RESKEY_ntpstatus" -d "$OCF_RESKEY_dampen" $attrd_options
+ if [ $? -eq $OCF_SUCCESS ]; then
+ rm "${OCF_RESKEY_state}"
+ fi
+ rm -f "${VERIFY_SERIALIZED_FILE}"
+ return $OCF_SUCCESS
+}
+
+ntpstatus_monitor() {
+ if [ $OCF_RESKEY_op_sleep -ne 0 ]; then
+ if [ "$1" = "" ] && [ -f "${VERIFY_SERIALIZED_FILE}" ]; then
+ # two monitor ops have occurred at the same time.
+ # This verifies a condition in pacemaker-execd regression tests.
+ ocf_log err "$VERIFY_SERIALIZED_FILE exists already"
+ ocf_exit_reason "alternate universe collision"
+ return $OCF_ERR_GENERIC
+ fi
+
+ touch "${VERIFY_SERIALIZED_FILE}"
+ sleep ${OCF_RESKEY_op_sleep}
+ rm "${VERIFY_SERIALIZED_FILE}"
+ fi
+
+ if [ -f "${OCF_RESKEY_state}" ]; then
+ ntpstatus_update
+ # Multiple monitor levels are defined to support various tests
+ case "$OCF_CHECK_LEVEL" in
+ 10)
+ # monitor level with delay, useful for testing timeouts
+ sleep 30
+ ;;
+
+ 20)
+ # monitor level that fails intermittently
+ n=$(expr "$(dd if=/dev/urandom bs=1 count=1 2>/dev/null | od | head -1 | cut -f2 -d' ')" % 5)
+ if [ $n -eq 1 ]; then
+ ocf_exit_reason "smoke detected near CPU fan"
+ return $OCF_ERR_GENERIC
+ fi
+ ;;
+
+ 30)
+ # monitor level that always fails
+ ocf_exit_reason "hyperdrive quota reached"
+ return $OCF_ERR_GENERIC
+ ;;
+
+ 40)
+ # monitor level that returns error code from state file
+ rc=$(cat ${OCF_RESKEY_state})
+ [ -n "$rc" ] && ocf_exit_reason "CPU ejected. Observed leaving the Kronosnet galaxy at $rc times the speed of light." && return $rc
+ ;;
+
+ *)
+ ;;
+ esac
+ return $OCF_SUCCESS
+ fi
+ return $OCF_NOT_RUNNING
+}
+
+ntpstatus_validate() {
+ # Is the state directory writable?
+ state_dir=$(dirname "$OCF_RESKEY_state")
+ [ -d "$state_dir" ] && [ -w "$state_dir" ] && [ -x "$state_dir" ]
+ if [ $? -ne 0 ]; then
+ return $OCF_ERR_ARGS
+ fi
+
+ # Check the host ip
+ if [ -z "$OCF_RESKEY_host_ip" ]; then
+ ocf_log err "Empty host_ip. Please specify a host to check"
+ exit $OCF_ERR_CONFIGURED
+ fi
+ return $OCF_SUCCESS
+}
+
+: ${OCF_RESKEY_op_sleep:=0}
+: ${OCF_RESKEY_CRM_meta_interval:=0}
+: ${OCF_RESKEY_CRM_meta_globally_unique:="false"}
+: ${OCF_RESKEY_ntpstatus:="ntpstatus"}
+: ${OCF_RESKEY_dampen:=75}
+: ${OCF_RESKEY_multiplier:=1000}
+
+if [ -z "$OCF_RESKEY_state" ]; then
+ OCF_RESKEY_state="${HA_VARRUN%%/}/ntpstatus-${OCF_RESOURCE_INSTANCE}.state"
+
+ if [ "${OCF_RESKEY_CRM_meta_globally_unique}" = "false" ]; then
+ # Strip off the trailing clone marker (note + is not portable in sed)
+ OCF_RESKEY_state=$(echo $OCF_RESKEY_state | sed s/:[0-9][0-9]*\.state/.state/)
+ fi
+fi
+VERIFY_SERIALIZED_FILE="${OCF_RESKEY_state}.serialized"
+
+dump_env
+
+case "$__OCF_ACTION" in
+meta-data) meta_data
+ exit $OCF_SUCCESS
+ ;;
+start) ntpstatus_start;;
+stop) ntpstatus_stop;;
+monitor) ntpstatus_monitor;;
+migrate_to) ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} to ${OCF_RESKEY_CRM_meta_migrate_target}."
+ ntpstatus_stop
+ ;;
+migrate_from) ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} from ${OCF_RESKEY_CRM_meta_migrate_source}."
+ ntpstatus_start
+ ;;
+reload) ocf_log err "Reloading..."
+ ntpstatus_start
+ ;;
+validate-all) ntpstatus_validate;;
+usage|help) ntpstatus_usage
+ exit $OCF_SUCCESS
+ ;;
+*) ntpstatus_usage
+ exit $OCF_ERR_UNIMPLEMENTED
+ ;;
+esac
+rc=$?
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
+exit $rc
+
+# vim: set filetype=sh expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=80:
diff --git a/roles/centos_physical_machine/files/pacemaker_ra/ptpstatus b/roles/centos_physical_machine/files/pacemaker_ra/ptpstatus
new file mode 100755
index 000000000..d3d42ab1e
--- /dev/null
+++ b/roles/centos_physical_machine/files/pacemaker_ra/ptpstatus
@@ -0,0 +1,303 @@
+#!/bin/sh
+#
+# ocf:seapath:ptpstatus resource agent
+#
+# Original copyright 2004 SUSE LINUX AG, Lars Marowsky-Bre
+# Later changes copyright 2008-2019 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# (GPLv2) WITHOUT ANY WARRANTY.
+#
+# crm config example:
+#primitive ptpstatus_test ocf:seapath:ptpstatus \
+# op monitor timeout=10 interval=10
+#clone cl_ptpstatus_test ptpstatus_test \
+# meta target-role=Started
+#location ptp_test_debian debian \
+# rule ptpstatus: defined ptpstatus
+#
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS:="${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs"}
+. "${OCF_FUNCTIONS}"
+: ${__OCF_ACTION:="$1"}
+
+#######################################################################
+
+meta_data() {
+ cat <
+
+
+1.0
+
+
+Checks the status of the PTP synchronization
+
+Checks the status of the PTP synchronization
+
+
+
+
+Location to store the resource state in.
+
+State file
+
+
+
+
+
+ptpstatus
+
+ptpstatus
+
+
+
+
+
+Number of seconds to sleep during operations. This can be used to test how
+the cluster reacts to operation timeouts.
+
+Operation sleep duration in seconds.
+
+
+
+
+
+Start actions will return failure if running on the host specified here, but
+the resource will start successfully anyway (future monitor calls will find it
+running). This can be used to test on-fail=ignore.
+
+Report bogus start failure on specified host
+
+
+
+
+
+If this is set, the environment will be dumped to this file for every call.
+
+Environment dump file
+
+
+
+
+
+The number by which to multiply the connectivity (0 or 1) by
+
+Value multiplier
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+END
+}
+
+#######################################################################
+
+# don't exit on TERM, to test that pacemaker-execd makes sure that we do exit
+trap sigterm_handler TERM
+sigterm_handler() {
+ ocf_log info "They use TERM to bring us down. No such luck."
+
+ # Since we're likely going to get KILLed, clean up any monitor
+ # serialization in progress, so the next probe doesn't return an error.
+ rm -f "${VERIFY_SERIALIZED_FILE}"
+ return
+}
+
+ptpstatus_usage() {
+ cat <> "${OCF_RESKEY_envfile}"
+ fi
+}
+
+ptpstatus_update() {
+ status=`/usr/bin/chronyc -n sources | grep -E "#[*+] PTP0" | wc -l`
+ case "${status#[+]}" in
+ ''|*[!0-9]*)
+ status=0
+ ;;
+ esac
+ status=$(expr $status \* $OCF_RESKEY_multiplier)
+ if [ "$__OCF_ACTION" = "start" ] ; then
+ attrd_updater -n "$OCF_RESKEY_ptpstatus" -B "$status" -d "$OCF_RESKEY_dampen" $attrd_options
+ else
+ attrd_updater -n "$OCF_RESKEY_ptpstatus" -v "$status" -d "$OCF_RESKEY_dampen" $attrd_options
+ fi
+ rc=$?
+ case $rc in
+ 0) #ocf_log info "Updated $OCF_RESKEY_ptpstatus = $status"
+ ;;
+ *) ocf_log warn "Could not update $OCF_RESKEY_ptpstatus = $status: rc=$rc";;
+ esac
+ if [ $rc -ne 0 ]; then
+ return $rc
+ fi
+}
+
+ptpstatus_start() {
+ ptpstatus_monitor
+
+ DS_RETVAL=$?
+ if [ $DS_RETVAL -eq $OCF_SUCCESS ]; then
+ if [ "$(uname -n)" = "${OCF_RESKEY_fail_start_on}" ]; then
+ DS_RETVAL=$OCF_ERR_GENERIC
+ fi
+ return $DS_RETVAL
+ fi
+
+ touch "${OCF_RESKEY_state}"
+ DS_RETVAL=$?
+ if [ "$(uname -n)" = "${OCF_RESKEY_fail_start_on}" ]; then
+ DS_RETVAL=$OCF_ERR_GENERIC
+ fi
+ ptpstatus_update
+ return $DS_RETVAL
+}
+
+ptpstatus_stop() {
+ ptpstatus_monitor --force
+ attrd_updater -D -n "$OCF_RESKEY_ptpstatus" -d "$OCF_RESKEY_dampen" $attrd_options
+ if [ $? -eq $OCF_SUCCESS ]; then
+ rm "${OCF_RESKEY_state}"
+ fi
+ rm -f "${VERIFY_SERIALIZED_FILE}"
+ return $OCF_SUCCESS
+}
+
+ptpstatus_monitor() {
+ if [ $OCF_RESKEY_op_sleep -ne 0 ]; then
+ if [ "$1" = "" ] && [ -f "${VERIFY_SERIALIZED_FILE}" ]; then
+ # two monitor ops have occurred at the same time.
+ # This verifies a condition in pacemaker-execd regression tests.
+ ocf_log err "$VERIFY_SERIALIZED_FILE exists already"
+ ocf_exit_reason "alternate universe collision"
+ return $OCF_ERR_GENERIC
+ fi
+
+ touch "${VERIFY_SERIALIZED_FILE}"
+ sleep ${OCF_RESKEY_op_sleep}
+ rm "${VERIFY_SERIALIZED_FILE}"
+ fi
+
+ if [ -f "${OCF_RESKEY_state}" ]; then
+ ptpstatus_update
+ # Multiple monitor levels are defined to support various tests
+ case "$OCF_CHECK_LEVEL" in
+ 10)
+ # monitor level with delay, useful for testing timeouts
+ sleep 30
+ ;;
+
+ 20)
+ # monitor level that fails intermittently
+ n=$(expr "$(dd if=/dev/urandom bs=1 count=1 2>/dev/null | od | head -1 | cut -f2 -d' ')" % 5)
+ if [ $n -eq 1 ]; then
+ ocf_exit_reason "smoke detected near CPU fan"
+ return $OCF_ERR_GENERIC
+ fi
+ ;;
+
+ 30)
+ # monitor level that always fails
+ ocf_exit_reason "hyperdrive quota reached"
+ return $OCF_ERR_GENERIC
+ ;;
+
+ 40)
+ # monitor level that returns error code from state file
+ rc=$(cat ${OCF_RESKEY_state})
+ [ -n "$rc" ] && ocf_exit_reason "CPU ejected. Observed leaving the Kronosnet galaxy at $rc times the speed of light." && return $rc
+ ;;
+
+ *)
+ ;;
+ esac
+ return $OCF_SUCCESS
+ fi
+ return $OCF_NOT_RUNNING
+}
+
+ptpstatus_validate() {
+ # Is the state directory writable?
+ state_dir=$(dirname "$OCF_RESKEY_state")
+ [ -d "$state_dir" ] && [ -w "$state_dir" ] && [ -x "$state_dir" ]
+ if [ $? -ne 0 ]; then
+ return $OCF_ERR_ARGS
+ fi
+ return $OCF_SUCCESS
+}
+
+: ${OCF_RESKEY_op_sleep:=0}
+: ${OCF_RESKEY_CRM_meta_interval:=0}
+: ${OCF_RESKEY_CRM_meta_globally_unique:="false"}
+: ${OCF_RESKEY_ptpstatus:="ptpstatus"}
+: ${OCF_RESKEY_dampen:=75}
+: ${OCF_RESKEY_multiplier:=1000}
+
+if [ -z "$OCF_RESKEY_state" ]; then
+ OCF_RESKEY_state="${HA_VARRUN%%/}/ptpstatus-${OCF_RESOURCE_INSTANCE}.state"
+
+ if [ "${OCF_RESKEY_CRM_meta_globally_unique}" = "false" ]; then
+ # Strip off the trailing clone marker (note + is not portable in sed)
+ OCF_RESKEY_state=$(echo $OCF_RESKEY_state | sed s/:[0-9][0-9]*\.state/.state/)
+ fi
+fi
+VERIFY_SERIALIZED_FILE="${OCF_RESKEY_state}.serialized"
+
+dump_env
+
+case "$__OCF_ACTION" in
+meta-data) meta_data
+ exit $OCF_SUCCESS
+ ;;
+start) ptpstatus_start;;
+stop) ptpstatus_stop;;
+monitor) ptpstatus_monitor;;
+migrate_to) ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} to ${OCF_RESKEY_CRM_meta_migrate_target}."
+ ptpstatus_stop
+ ;;
+migrate_from) ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} from ${OCF_RESKEY_CRM_meta_migrate_source}."
+ ptpstatus_start
+ ;;
+reload) ocf_log err "Reloading..."
+ ptpstatus_start
+ ;;
+validate-all) ptpstatus_validate;;
+usage|help) ptpstatus_usage
+ exit $OCF_SUCCESS
+ ;;
+*) ptpstatus_usage
+ exit $OCF_ERR_UNIMPLEMENTED
+ ;;
+esac
+rc=$?
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
+exit $rc
+
+# vim: set filetype=sh expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=80:
diff --git a/roles/centos/physical_machine/handlers/main.yml b/roles/centos_physical_machine/handlers/main.yml
similarity index 100%
rename from roles/centos/physical_machine/handlers/main.yml
rename to roles/centos_physical_machine/handlers/main.yml
diff --git a/roles/centos_physical_machine/initramfs-tools/conf.d/rebooter.conf.j2 b/roles/centos_physical_machine/initramfs-tools/conf.d/rebooter.conf.j2
new file mode 100644
index 000000000..5741d2d16
--- /dev/null
+++ b/roles/centos_physical_machine/initramfs-tools/conf.d/rebooter.conf.j2
@@ -0,0 +1,5 @@
+# Device for /var/log storage
+REBOOTER_LOG_DEVICE={{ lvm_rebooter_log_device }}
+
+# Relative path from device root for /var/log
+REBOOTER_LOG_PATH={{ lvm_rebooter_log_path }}
diff --git a/roles/centos_physical_machine/initramfs-tools/scripts/init-bottom/rebooter b/roles/centos_physical_machine/initramfs-tools/scripts/init-bottom/rebooter
new file mode 100755
index 000000000..fa1934f90
--- /dev/null
+++ b/roles/centos_physical_machine/initramfs-tools/scripts/init-bottom/rebooter
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+PREREQ="lvm"
+
+prereqs()
+{
+ echo "$PREREQ"
+}
+
+case $1 in
+prereqs)
+ prereqs
+ exit 0
+ ;;
+esac
+
+. /scripts/functions
+
+# Get REBOOTER_LOG_DEVICE and REBOOTER_LOG_PATH from config
+# Default to SEAPATH default (a dedicated LV called vg1-varlog)
+REBOOTER_LOG_DEVICE=/dev/mapper/vg1-varlog
+REBOOTER_LOG_PATH=.
+if [ -e /conf/conf.d/rebooter.conf ]; then
+ . /conf/conf.d/rebooter.conf
+fi
+
+log_begin_msg "Rebooter starting"
+if [ -e /run/initramfs/do_reboot ]; then
+ _log_msg "Rebooting...\n"
+ MOUNT_POINT="/run/mnt"
+ mkdir -p $MOUNT_POINT
+ mount -o sync,rw $REBOOTER_LOG_DEVICE $MOUNT_POINT
+ LOG_PATH="$MOUNT_POINT/$REBOOTER_LOG_PATH/initramfs.log"
+ echo "== $(date) ==" >> $LOG_PATH
+ dmesg >> $LOG_PATH
+ echo >> $LOG_PATH
+ umount $MOUNT_POINT
+ reboot -f -d 1 # No init to handle reboot => -f
+else
+ _log_msg "(no reboot needed) "
+fi
+log_end_msg
+
+exit 0
diff --git a/roles/centos_physical_machine/initramfs-tools/scripts/init-premount/lvm_snapshot_rebooter b/roles/centos_physical_machine/initramfs-tools/scripts/init-premount/lvm_snapshot_rebooter
new file mode 100755
index 000000000..4a1d8e7fb
--- /dev/null
+++ b/roles/centos_physical_machine/initramfs-tools/scripts/init-premount/lvm_snapshot_rebooter
@@ -0,0 +1,49 @@
+#!/bin/sh
+# LVM snapshot rebooter : wait for any merging LV and reboot once done
+# Use this it to workaround GRUB limitation : it does not handle LV with merging snapshot
+
+PREREQ="lvm"
+prereqs()
+{
+ echo "$PREREQ"
+}
+
+case $1 in
+prereqs)
+ prereqs
+ exit 0
+ ;;
+esac
+
+. /scripts/functions
+
+if [ ! -x "/sbin/lvm" ]; then
+ panic "lvs executable not found"
+fi
+
+get_lv_snapshot_merging() {
+ # This will print a list of "vg/lv" marked for merging
+ /sbin/lvm lvs --select 'lv_merging!=0' --noheadings --separator '/' -o vg_name,lv_name
+}
+
+log_begin_msg "Starting LVM Snapshot rebooter"
+
+# Wait for LVM elements to appear and be activated by udev
+wait_for_udev 10
+
+lv_merging=$(get_lv_snapshot_merging)
+if [ -n "$lv_merging" ] ; then
+ log_end_msg
+ for lv in $lv_merging; do
+ log_begin_msg " Merging $lv"
+ /sbin/lvm lvchange --sysinit -ay "$lv" # lvmpolld/dmeventd no yet available
+ /sbin/lvm lvpoll --polloperation merge --interval 1 --config activation/monitoring=0 "$lv"
+ log_end_msg
+ done
+ log_success_msg "Snapshot merging complete, will reboot..."
+ touch /run/initramfs/do_reboot
+else
+ log_success_msg "Done. (No LVM snapshot need merging)"
+fi
+
+exit 0
diff --git a/roles/centos_physical_machine/initramfs-tools/scripts/init-top/init_log b/roles/centos_physical_machine/initramfs-tools/scripts/init-top/init_log
new file mode 100755
index 000000000..297e8e3d0
--- /dev/null
+++ b/roles/centos_physical_machine/initramfs-tools/scripts/init-top/init_log
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+prereqs()
+{
+ echo "$PREREQ"
+}
+
+case $1 in
+prereqs)
+ prereqs
+ exit 0
+ ;;
+esac
+
+. /scripts/functions
+
+cat >> /conf/param.conf <init: " \$0}' > /dev/kmsg /run/initramfs/tempfifo 2>&1
+ rm /run/initramfs/tempfifo
+
+ INIT_LOG_DONE=done
+fi
+EOF
+exit 0
diff --git a/roles/centos_physical_machine/tasks/main.yml b/roles/centos_physical_machine/tasks/main.yml
new file mode 100644
index 000000000..cea1c4c03
--- /dev/null
+++ b/roles/centos_physical_machine/tasks/main.yml
@@ -0,0 +1,256 @@
+# Copyright (C) 2024 RTE
+# Copyright (C) 2024 Red Hat, Inc.
+# SPDX-License-Identifier: Apache-2.0
+
+- name: Copy sysctl rules
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: /etc/sysctl.d/{{ item }}
+ mode: '0644'
+ with_items:
+ - 00-bridge_nf_call.conf
+ register: sysctl1
+
+- name: Add sysctl conf from inventory (extra_sysctl_physical_machines)
+ ansible.builtin.copy:
+ dest: /etc/sysctl.d/00-seapathextra_physicalmachines.conf
+ mode: '0644'
+ content: "{{ extra_sysctl_physical_machines }}"
+ when: extra_sysctl_physical_machines is defined
+ register: sysctl2
+
+- name: restart systemd-sysctl if needed
+ ansible.builtin.systemd:
+ name: systemd-sysctl.service
+ state: restarted
+ when: sysctl1.changed or sysctl2.changed
+
+- name: create src folder on hosts
+ file:
+ path: /tmp/src
+ state: directory
+ mode: '0755'
+
+- name: temp fix for synchronize to force evaluate variables
+ set_fact:
+ ansible_host: "{{ ansible_host }}"
+
+- name: deploy vm_manager
+ include_role:
+ name: deploy_vm_manager
+
+- name: deploy python3-setup-ovs
+ include_role:
+ name: deploy_python3_setup_ovs
+
+- name: Copy consolevm.sh
+ template:
+ src: consolevm.sh.j2
+ dest: /usr/local/bin/consolevm
+ mode: '0755'
+
+- name: create /usr/lib/ocf/resource.d/seapath on hosts
+ file:
+ path: /usr/lib/ocf/resource.d/seapath
+ state: directory
+ mode: '0755'
+
+- name: Copy Pacemaker Seapath Resource-Agent files
+ ansible.posix.synchronize:
+ src: pacemaker_ra/
+ dest: /usr/lib/ocf/resource.d/seapath/
+ rsync_opts:
+ - "--chmod=F755"
+ - "--chown=root:root"
+
+- name: Copy chrony-wait.service
+ template:
+ src: chrony-wait.service.j2
+ dest: /etc/systemd/system/chrony-wait.service
+ owner: root
+ group: root
+ mode: '0644'
+ register: chronywait
+- name: daemon-reload chrony-wait.service
+ ansible.builtin.service:
+ daemon_reload: yes
+ when: chronywait.changed
+- name: enable chrony-wait.service
+ ansible.builtin.systemd:
+ name: chrony-wait.service
+ enabled: yes
+
+- name: Create libvirtd.service.d directory
+ file:
+ path: /etc/systemd/system/libvirtd.service.d/
+ state: directory
+ owner: root
+ group: root
+ mode: 0755
+- name: Create pacemaker.service.d directory
+ file:
+ path: /etc/systemd/system/pacemaker.service.d/
+ state: directory
+ owner: root
+ group: root
+ mode: 0755
+- name: Copy pacemaker.service drop-in
+ template:
+ src: pacemaker_override.conf.j2
+ dest: /etc/systemd/system/pacemaker.service.d/override.conf
+ owner: root
+ group: root
+ mode: 0644
+ notify: daemon-reload
+ register: pacemaker_corosync
+- name: Get Pacemaker service Status
+ ansible.builtin.systemd:
+ name: "pacemaker.service"
+ register: pacemaker_service_status
+- name: disable pacemaker (reinstall step 1/2)
+ ansible.builtin.systemd:
+ name: pacemaker.service
+ enabled: no
+ when: pacemaker_corosync.changed and pacemaker_service_status.status.UnitFileState == "enabled"
+- name: enable pacemaker (reinstall step 2/2)
+ ansible.builtin.systemd:
+ name: pacemaker.service
+ enabled: yes
+ when: pacemaker_corosync.changed and pacemaker_service_status.status.UnitFileState == "enabled"
+
+- name: Add extra modules to the kernel
+ block:
+ - name: create extra modules conf file
+ ansible.builtin.file:
+ path: /etc/modules-load.d/extra_modules.conf
+ owner: root
+ group: root
+ mode: 0751
+ state: touch
+ - name: add extra modules to conf file
+ lineinfile:
+ dest: /etc/modules-load.d/extra_modules.conf
+ state: present
+ regexp: "^{{ item }}$"
+ line: "{{ item }}"
+ with_items: "{{ extra_kernel_modules | default([]) }}"
+
+- name: Add admin user to libvirt group
+ user:
+ name: "{{ admin_user }}"
+ groups: libvirt
+ append: yes
+
+
+- name: Creating libvirt user with libvirtd permissions
+ user: name=libvirt
+ group=libvirt
+ shell=/bin/false
+
+- name: add br_netfilter to /etc/modules-load.d
+ ansible.builtin.copy:
+ src: modules/netfilter.conf
+ dest: /etc/modules-load.d/netfilter.conf
+ owner: root
+ group: root
+ mode: 0751
+
+- name: lineinfile in hosts file for logstash-seapath
+ lineinfile:
+ dest: /etc/hosts
+ regexp: '.* logstash-seapath$'
+ line: "{{ logstash_server_ip }} logstash-seapath"
+ state: present
+ when: logstash_server_ip is defined
+
+- name: Make libvirt use the "machine-id" way to determine host UUID
+ lineinfile:
+ dest: /etc/libvirt/libvirtd.conf
+ regexp: "^#?host_uuid_source ="
+ line: "host_uuid_source = \"machine-id\""
+ state: present
+- name: restart libvirtd
+ ansible.builtin.systemd:
+ name: libvirtd.service
+ state: restarted
+
+- name: enable virtsecretd
+ ansible.builtin.systemd:
+ name: virtsecretd.service
+ enabled: yes
+ state: started
+
+- name: enable docker.service
+ ansible.builtin.systemd:
+ name: docker.service
+- name: "add initramfs-tools scripts: script file (LVM rebooter and log handling)"
+ ansible.builtin.copy:
+ src: initramfs-tools/scripts/
+ dest: /etc/initramfs-tools/scripts/
+ mode: '0755'
+ register: initramfs_tools_scripts
+
+- name: "get the /var/log/ device"
+ command: "findmnt -n -o SOURCE --target /var/log"
+ register: varlog_dev
+
+- name: "set_fact /var/log/ device"
+ set_fact:
+ lvm_rebooter_log_device: "{{ varlog_dev.stdout }}"
+
+- name: "get the /var/log/ relative path"
+ shell: "realpath --relative-to=$(findmnt -n -o TARGET --target /var/log/) /var/log"
+ register: varlog_path
+
+- name: "set_fact /var/log/ relative path"
+ set_fact:
+ lvm_rebooter_log_path: "{{ varlog_path.stdout }}"
+
+- name: "Copy rebooter.conf"
+ template:
+ src: initramfs-tools/conf.d/rebooter.conf.j2
+ dest: /etc/dracut.conf.d/rebooter.conf
+
+- name: "configure initramfs-tools to use busybox"
+ lineinfile:
+ dest: /etc/dracut.conf
+ regexp: "^#?BUSYBOX="
+ line: "BUSYBOX=y"
+ state: present
+ register: initramfs_busybox
+
+- name: "rebuild initramfs if necessary"
+ command:
+ cmd: /usr/bin/dracut --regenerate-all --force
+ when: initramfs_tools_scripts.changed or initramfs_busybox.changed
+
+- name: "add rbd type to lvm.conf"
+ ansible.builtin.lineinfile:
+ path: /etc/lvm/lvm.conf
+ insertafter: 'devices {'
+ line: " types = [ \"rbd\", 1024 ]"
+ state: present
+
+- name: "Configure firewalld for ceph"
+ block:
+ - name: "Configure firewalld for ceph monitor"
+ ansible.posix.firewalld:
+ port: 3300/tcp
+ permanent: true
+ state: enabled
+ - name: "Configure firewalld for ceph monitor legacy v1 port"
+ ansible.posix.firewalld:
+ port: 6789/tcp
+ permanent: true
+ state: enabled
+ - name: "Configure firewalld for ceph OSD"
+ ansible.posix.firewalld:
+ port: 6800-7300/tcp
+ permanent: true
+ state: enabled
+
+- name: "Configure firewalld for high-availability services"
+ ansible.posix.firewalld:
+ service: high-availability
+ permanent: true
+ state: enabled
diff --git a/roles/centos_physical_machine/templates/chrony-wait.service.j2 b/roles/centos_physical_machine/templates/chrony-wait.service.j2
new file mode 100644
index 000000000..ecc1edf2a
--- /dev/null
+++ b/roles/centos_physical_machine/templates/chrony-wait.service.j2
@@ -0,0 +1,46 @@
+[Unit]
+Description=Wait for chrony to synchronize system clock
+Documentation=man:chronyc(1)
+After=timemaster.service
+Requires=timemaster.service
+Before=time-sync.target
+Wants=time-sync.target
+
+[Service]
+Type=oneshot
+# Wait for chronyd to update the clock and the remaining
+# correction to be less than 0.1 seconds
+ExecStart=/usr/bin/chronyc -h 127.0.0.1,::1 waitsync 0 0.1 0.0 1
+# Wait for at most chrony_wait_timeout_sec seconds
+TimeoutStartSec={{ chrony_wait_timeout_sec | default(180) }}
+RemainAfterExit=yes
+StandardOutput=null
+
+CapabilityBoundingSet=
+DevicePolicy=closed
+DynamicUser=yes
+IPAddressAllow=localhost
+IPAddressDeny=any
+LockPersonality=yes
+MemoryDenyWriteExecute=yes
+PrivateDevices=yes
+PrivateUsers=yes
+ProtectClock=yes
+ProtectControlGroups=yes
+ProtectHome=yes
+ProtectHostname=yes
+ProtectKernelLogs=yes
+ProtectKernelModules=yes
+ProtectKernelTunables=yes
+ProtectProc=invisible
+ProtectSystem=strict
+RestrictAddressFamilies=AF_INET AF_INET6
+RestrictNamespaces=yes
+RestrictRealtime=yes
+SystemCallArchitectures=native
+SystemCallFilter=@system-service
+SystemCallFilter=~@privileged @resources
+UMask=0777
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/centos_physical_machine/templates/consolevm.sh.j2 b/roles/centos_physical_machine/templates/consolevm.sh.j2
new file mode 100644
index 000000000..a9fddd4ff
--- /dev/null
+++ b/roles/centos_physical_machine/templates/consolevm.sh.j2
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+vm=$1
+if [ -z $vm ]
+then
+ echo no vm defined
+else
+ echo "you want a console to $vm, ok let's go"
+ hyp=`crm status | grep -E "^ \* $vm\b" | grep Started | awk 'NF>1{print $NF}'`
+ if [ -z $hyp ]
+ then
+ echo no hypervisor found for this vm
+ else
+ echo "$vm is running on $hyp, let's connect !"
+ virsh --connect qemu+ssh://{{ admin_user }}@$hyp/system console $vm
+ fi
+fi
diff --git a/roles/centos_physical_machine/templates/pacemaker_override.conf.j2 b/roles/centos_physical_machine/templates/pacemaker_override.conf.j2
new file mode 100644
index 000000000..554df2a3e
--- /dev/null
+++ b/roles/centos_physical_machine/templates/pacemaker_override.conf.j2
@@ -0,0 +1,10 @@
+[Unit]
+Wants=libvirtd.service
+After=libvirtd.service
+
+[Install]
+WantedBy=corosync.service
+
+[Service]
+TimeoutStopSec={{ pacemaker_shutdown_timeout | default("2min") }}
+TimeoutStartSec=60s
diff --git a/roles/debian/tasks/main.yml b/roles/debian/tasks/main.yml
index c1b78f20c..480db0861 100644
--- a/roles/debian/tasks/main.yml
+++ b/roles/debian/tasks/main.yml
@@ -245,3 +245,14 @@
- name: update-grub
command: update-grub
when: updategrub0.changed or updategrub1.changed or updategrub2.changed
+
+- name: Stop and disable chrony
+ ansible.builtin.systemd:
+ name: "chrony"
+ state: stopped
+ enabled: false
+- name: Stop and disable systemd-resolved
+ ansible.builtin.systemd:
+ name: systemd-resolved
+ state: stopped
+ enabled: false
diff --git a/roles/debian/hardening/handlers/main.yml b/roles/debian_hardening/handlers/main.yml
similarity index 100%
rename from roles/debian/hardening/handlers/main.yml
rename to roles/debian_hardening/handlers/main.yml
diff --git a/roles/debian/hardening/tasks/main.yml b/roles/debian_hardening/tasks/main.yml
similarity index 100%
rename from roles/debian/hardening/tasks/main.yml
rename to roles/debian_hardening/tasks/main.yml
diff --git a/roles/debian/hardening/templates/01_password.j2 b/roles/debian_hardening/templates/01_password.j2
similarity index 100%
rename from roles/debian/hardening/templates/01_password.j2
rename to roles/debian_hardening/templates/01_password.j2
diff --git a/roles/debian/hardening/vars/main.yml b/roles/debian_hardening/vars/main.yml
similarity index 100%
rename from roles/debian/hardening/vars/main.yml
rename to roles/debian_hardening/vars/main.yml
diff --git a/roles/debian/hardening/physical_machine/tasks/main.yml b/roles/debian_hardening_physical_machine/tasks/main.yml
similarity index 100%
rename from roles/debian/hardening/physical_machine/tasks/main.yml
rename to roles/debian_hardening_physical_machine/tasks/main.yml
diff --git a/roles/debian/hardening/physical_machine/vars/main.yml b/roles/debian_hardening_physical_machine/vars/main.yml
similarity index 100%
rename from roles/debian/hardening/physical_machine/vars/main.yml
rename to roles/debian_hardening_physical_machine/vars/main.yml
diff --git a/roles/debian/hypervisor/tasks/main.yml b/roles/debian_hypervisor/tasks/main.yml
similarity index 100%
rename from roles/debian/hypervisor/tasks/main.yml
rename to roles/debian_hypervisor/tasks/main.yml
diff --git a/roles/debian/physical_machine/handlers/main.yml b/roles/debian_physical_machine/handlers/main.yml
similarity index 100%
rename from roles/debian/physical_machine/handlers/main.yml
rename to roles/debian_physical_machine/handlers/main.yml
diff --git a/roles/debian/physical_machine/tasks/main.yml b/roles/debian_physical_machine/tasks/main.yml
similarity index 89%
rename from roles/debian/physical_machine/tasks/main.yml
rename to roles/debian_physical_machine/tasks/main.yml
index 4ef3e6a17..81f65e5c9 100644
--- a/roles/debian/physical_machine/tasks/main.yml
+++ b/roles/debian_physical_machine/tasks/main.yml
@@ -167,46 +167,13 @@
set_fact:
ansible_host: "{{ ansible_host }}"
-- name: Synchronization of src python3-setup-ovs on the control machine to dest on the remote hosts
- ansible.posix.synchronize:
- src: ../src/debian/python3-setup-ovs
- dest: /tmp/src
- rsync_opts:
- - "--chown=root:root"
-- name: Install python3-setup-ovs
- command:
- cmd: /usr/bin/python3 setup.py install
- chdir: /tmp/src/python3-setup-ovs
-- name: Copy seapath-config_ovs.service
- ansible.builtin.copy:
- src: ../src/debian/seapath-config_ovs.service
- dest: /etc/systemd/system/seapath-config_ovs.service
- mode: '0644'
- register: seapathconfigovs
-- name: daemon-reload seapath-config_ovs
- ansible.builtin.service:
- daemon_reload: yes
- when: seapathconfigovs.changed
-- name: enable seapath-config_ovs.service
- ansible.builtin.systemd:
- name: seapath-config_ovs.service
- enabled: yes
+- name: deploy vm_manager
+ include_role:
+ name: deploy_vm_manager
-- name: Synchronization of src vm_manager on the control machine to dest on the remote hosts
- ansible.posix.synchronize:
- src: ../src/debian/vm_manager
- dest: /tmp/src
- rsync_opts:
- - "--chown=root:root"
-- name: Install vm_manager
- command:
- cmd: /usr/bin/python3 setup.py install
- chdir: /tmp/src/vm_manager
-- name: Create a symbolic link
- ansible.builtin.file:
- src: /usr/local/bin/vm_manager_cmd.py
- dest: /usr/local/bin/vm-mgr
- state: link
+- name: deploy python3-setup-ovs
+ include_role:
+ name: deploy_python3_setup_ovs
- name: Copy consolevm.sh
template:
diff --git a/roles/deploy_cukinia/README.md b/roles/deploy_cukinia/README.md
new file mode 100644
index 000000000..5abd0b530
--- /dev/null
+++ b/roles/deploy_cukinia/README.md
@@ -0,0 +1,24 @@
+# Deploy cukinia Role
+
+This role deploys the cukinia utility
+
+## Requirements
+
+no requirement.
+
+## Role Variables
+
+no variable.
+
+## Example Playbook
+
+```yaml
+- name: deploy cukinia
+ hosts:
+ - cluster_machines
+ - standalone_machine
+ - VMs
+ become: true
+ roles:
+ - { role: seapath_ansible.deploy_cukinia }
+```
diff --git a/src/cukinia b/roles/deploy_cukinia/files/cukinia
similarity index 100%
rename from src/cukinia
rename to roles/deploy_cukinia/files/cukinia
diff --git a/roles/deploy_cukinia/meta/main.yml b/roles/deploy_cukinia/meta/main.yml
new file mode 100644
index 000000000..1f387619d
--- /dev/null
+++ b/roles/deploy_cukinia/meta/main.yml
@@ -0,0 +1,11 @@
+---
+galaxy_info:
+ author: "RTE"
+ description: deploys cukinia
+ license: Apache-2.0
+ min_ansible_version: 2.9.10
+ platforms:
+ - name: Debian
+ versions:
+ - all
+dependencies: []
diff --git a/roles/deploy_cukinia/tasks/main.yml b/roles/deploy_cukinia/tasks/main.yml
new file mode 100644
index 000000000..01e97da3a
--- /dev/null
+++ b/roles/deploy_cukinia/tasks/main.yml
@@ -0,0 +1,19 @@
+# Copyright (C) 2024 RTE
+# SPDX-License-Identifier: Apache-2.0
+
+---
+- name: Copy Cukinia script
+ copy:
+ src: cukinia/cukinia
+ dest: /usr/local/bin/cukinia
+ mode: '0755'
+- name: Use Bash as Cukinia's shell
+ replace:
+ path: /usr/local/bin/cukinia
+ regexp: '^#!/bin/sh$'
+ replace: "#!/usr/bin/env bash"
+- name: "Patch cukinia: CAT"
+ lineinfile:
+ path: /usr/local/bin/cukinia
+ line: "local CAT=zcat"
+ insertafter: 'local line=""'
diff --git a/roles/deploy_cukinia_tests/README.md b/roles/deploy_cukinia_tests/README.md
new file mode 100644
index 000000000..7dcbb2f77
--- /dev/null
+++ b/roles/deploy_cukinia_tests/README.md
@@ -0,0 +1,24 @@
+# Deploy cukinia tests Role
+
+This role deploys the cukinia tests
+
+## Requirements
+
+no requirement.
+
+## Role Variables
+
+- filter (can be "none", "hosts", "observers" or "guests")
+
+## Example Playbook
+
+```yaml
+- name: deploy cukinia tests
+ hosts:
+ - cluster_machines
+ - standalone_machine
+ - VMs
+ become: true
+ roles:
+ - { role: seapath_ansible.deploy_cukinia_tests, filter: "none" }
+```
diff --git a/roles/deploy_cukinia_tests/cukinia-tests b/roles/deploy_cukinia_tests/cukinia-tests
new file mode 160000
index 000000000..ac714c47e
--- /dev/null
+++ b/roles/deploy_cukinia_tests/cukinia-tests
@@ -0,0 +1 @@
+Subproject commit ac714c47eeb220b443dd8ffd2d2dad435bbc5352
diff --git a/roles/deploy_cukinia_tests/meta/main.yml b/roles/deploy_cukinia_tests/meta/main.yml
new file mode 100644
index 000000000..afa3d2363
--- /dev/null
+++ b/roles/deploy_cukinia_tests/meta/main.yml
@@ -0,0 +1,11 @@
+---
+galaxy_info:
+ author: "RTE"
+ description: deploys cukinia tests
+ license: Apache-2.0
+ min_ansible_version: 2.9.10
+ platforms:
+ - name: Debian
+ versions:
+ - all
+dependencies: []
diff --git a/roles/deploy_cukinia_tests/tasks/main.yml b/roles/deploy_cukinia_tests/tasks/main.yml
new file mode 100644
index 000000000..9ce6df800
--- /dev/null
+++ b/roles/deploy_cukinia_tests/tasks/main.yml
@@ -0,0 +1,79 @@
+# Copyright (C) 2024 RTE
+# SPDX-License-Identifier: Apache-2.0
+
+---
+- name: Copy Cukinia's tests
+ synchronize:
+ src: cukinia-tests/cukinia
+ dest: /etc/
+ delete: true
+ rsync_opts:
+ - "--exclude=*.j2"
+- name: Copy Cukinia's tests templates
+ template:
+ src: cukinia-tests/cukinia/{{ item.src }}
+ dest: /etc/cukinia/{{ item.dest }}
+ with_items:
+ - { src: 'common_security_tests.d/apt.conf.j2',
+ dest: 'common_security_tests.d/apt.conf' }
+ - { src: 'hypervisor_security_tests.d/shadow.conf.j2',
+ dest: 'hypervisor_security_tests.d/shadow.conf' }
+ - { src: 'hypervisor_security_tests.d/passwd.conf.j2',
+ dest: 'hypervisor_security_tests.d/passwd.conf' }
+ - { src: 'hypervisor_security_tests.d/groups.conf.j2',
+ dest: 'hypervisor_security_tests.d/groups.conf' }
+ - { src: 'common_security_tests.d/sudo.conf.j2',
+ dest: 'common_security_tests.d/sudo.conf' }
+- name: Create /usr/share/cukinia/includes
+ file:
+ path: /usr/share/cukinia/includes
+ state: directory
+ owner: root
+ group: root
+ mode: 0755
+- name: Copy Cukinia's includes
+ copy:
+ src: cukinia-tests/includes/
+ dest: /usr/share/cukinia/includes/
+- name: Create /usr/share/testdata
+ file:
+ path: /usr/share/testdata
+ state: directory
+ owner: root
+ group: root
+ mode: 0755
+
+- name: tasks only on hosts
+ block:
+ - name: Copy vm.xml
+ copy:
+ src: cukinia-tests/vm_manager_testdata/vm.xml
+ dest: /usr/share/testdata
+ - name: Copy wrong_vm_config.xml
+ copy:
+ src: cukinia-tests/vm_manager_testdata/wrong_vm_config.xml
+ dest: /usr/share/testdata
+ - name: Create a symlink cukinia.conf to cukinia-hypervisor.conf
+ file:
+ src: /etc/cukinia/cukinia-hypervisor.conf
+ dest: /etc/cukinia/cukinia.conf
+ state: link
+ when: "'hypervisors' in group_names"
+
+- name: Create /etc/cukinia.conf for observers
+ block:
+ - name: Create a symlink cukinia.conf to cukinia-observer.conf
+ file:
+ src: /etc/cukinia/cukinia-observer.conf
+ dest: /etc/cukinia/cukinia.conf
+ state: link
+ when: "'observers' in group_names"
+
+- name: Create /etc/cukinia.conf for VMs
+ block:
+ - name: Create a symlink cukinia.conf to cukinia-observer.conf
+ file:
+ src: /etc/cukinia/cukinia-observer.conf
+ dest: /etc/cukinia/cukinia.conf
+ state: link
+ when: "'VMs' in group_names"
diff --git a/roles/deploy_python3_setup_ovs/README.md b/roles/deploy_python3_setup_ovs/README.md
new file mode 100644
index 000000000..d042c071c
--- /dev/null
+++ b/roles/deploy_python3_setup_ovs/README.md
@@ -0,0 +1,21 @@
+# Deploy python3-setup-ovs Role
+
+deploys python3-setup-ovs (seapath-config_ovs)
+
+## Requirements
+
+no requirement.
+
+## Role Variables
+
+no variable.
+
+## Example Playbook
+
+```yaml
+- name: Deploy python3-setup-ovs
+ hosts: cluster_machines
+ become: true
+ roles:
+ - { role: seapath_ansible.python3-setup-ovs }
+```
diff --git a/src/debian/python3-setup-ovs b/roles/deploy_python3_setup_ovs/files/python3-setup-ovs
similarity index 100%
rename from src/debian/python3-setup-ovs
rename to roles/deploy_python3_setup_ovs/files/python3-setup-ovs
diff --git a/src/debian/seapath-config_ovs.service b/roles/deploy_python3_setup_ovs/files/seapath-config_ovs.service
similarity index 100%
rename from src/debian/seapath-config_ovs.service
rename to roles/deploy_python3_setup_ovs/files/seapath-config_ovs.service
diff --git a/roles/deploy_python3_setup_ovs/meta/main.yml b/roles/deploy_python3_setup_ovs/meta/main.yml
new file mode 100644
index 000000000..72d9eb140
--- /dev/null
+++ b/roles/deploy_python3_setup_ovs/meta/main.yml
@@ -0,0 +1,12 @@
+---
+galaxy_info:
+ author: "RTE"
+ description: deploys python3-setup-ovs (seapath-config_ovs)
+ license: Apache-2.0
+ min_ansible_version: 2.9.10
+ platforms:
+ - name: Debian
+ versions:
+ - all
+dependencies: []
+
diff --git a/roles/deploy_python3_setup_ovs/tasks/main.yml b/roles/deploy_python3_setup_ovs/tasks/main.yml
new file mode 100644
index 000000000..1f6f0649b
--- /dev/null
+++ b/roles/deploy_python3_setup_ovs/tasks/main.yml
@@ -0,0 +1,28 @@
+# Copyright (C) 2024 RTE
+# SPDX-License-Identifier: Apache-2.0
+
+---
+- name: Synchronization of src python3-setup-ovs on the control machine to dest on the remote hosts
+ ansible.posix.synchronize:
+ src: python3-setup-ovs
+ dest: /tmp/src
+ rsync_opts:
+ - "--chown=root:root"
+- name: Install python3-setup-ovs
+ command:
+ cmd: /usr/bin/python3 setup.py install
+ chdir: /tmp/src/python3-setup-ovs
+- name: Copy seapath-config_ovs.service
+ ansible.builtin.copy:
+ src: seapath-config_ovs.service
+ dest: /etc/systemd/system/seapath-config_ovs.service
+ mode: '0644'
+ register: seapathconfigovs
+- name: daemon-reload seapath-config_ovs
+ ansible.builtin.service:
+ daemon_reload: yes
+ when: seapathconfigovs.changed
+- name: enable seapath-config_ovs.service
+ ansible.builtin.systemd:
+ name: seapath-config_ovs.service
+ enabled: yes
diff --git a/roles/deploy_vm_manager/README.md b/roles/deploy_vm_manager/README.md
new file mode 100644
index 000000000..983861d0d
--- /dev/null
+++ b/roles/deploy_vm_manager/README.md
@@ -0,0 +1,21 @@
+# Deploy vm_manager Role
+
+This role deploys the vm_manager utility
+
+## Requirements
+
+no requirement.
+
+## Role Variables
+
+no variable.
+
+## Example Playbook
+
+```yaml
+- name: deploy vm_manager
+ hosts: cluster_machines
+ become: true
+ roles:
+ - { role: seapath_ansible.deploy_vm_manager }
+```
diff --git a/src/debian/vm_manager b/roles/deploy_vm_manager/files/vm_manager
similarity index 100%
rename from src/debian/vm_manager
rename to roles/deploy_vm_manager/files/vm_manager
diff --git a/roles/deploy_vm_manager/meta/main.yml b/roles/deploy_vm_manager/meta/main.yml
new file mode 100644
index 000000000..522eba174
--- /dev/null
+++ b/roles/deploy_vm_manager/meta/main.yml
@@ -0,0 +1,11 @@
+---
+galaxy_info:
+ author: "RTE"
+ description: deploys vm_manager
+ license: Apache-2.0
+ min_ansible_version: 2.9.10
+ platforms:
+ - name: Debian
+ versions:
+ - all
+dependencies: []
diff --git a/roles/deploy_vm_manager/tasks/main.yml b/roles/deploy_vm_manager/tasks/main.yml
new file mode 100644
index 000000000..5395c0058
--- /dev/null
+++ b/roles/deploy_vm_manager/tasks/main.yml
@@ -0,0 +1,19 @@
+# Copyright (C) 2024 RTE
+# SPDX-License-Identifier: Apache-2.0
+
+---
+- name: Synchronization of src vm_manager on the control machine to dest on the remote hosts
+ ansible.posix.synchronize:
+ src: vm_manager
+ dest: /tmp/src
+ rsync_opts:
+ - "--chown=root:root"
+- name: Install vm_manager
+ command:
+ cmd: /usr/bin/python3 setup.py install
+ chdir: /tmp/src/vm_manager
+- name: Create a symbolic link
+ ansible.builtin.file:
+ src: /usr/local/bin/vm_manager_cmd.py
+ dest: /usr/local/bin/vm-mgr
+ state: link
diff --git a/roles/timemaster/README.md b/roles/timemaster/README.md
new file mode 100644
index 000000000..fbcd591e5
--- /dev/null
+++ b/roles/timemaster/README.md
@@ -0,0 +1,21 @@
+# Timemaster Role
+
+This role configures timemaster
+
+## Requirements
+
+no requirement.
+
+## Role Variables
+
+no variable.
+
+## Example Playbook
+
+```yaml
+- name: Configure Timemaster
+ hosts: cluster_machines
+ become: true
+ roles:
+ - { role: seapath_ansible.timemaster }
+```
diff --git a/roles/timemaster/meta/main.yml b/roles/timemaster/meta/main.yml
new file mode 100644
index 000000000..56cf3fe19
--- /dev/null
+++ b/roles/timemaster/meta/main.yml
@@ -0,0 +1,13 @@
+# Copyright (C) 2024 RTE
+# SPDX-License-Identifier: Apache-2.0
+---
+galaxy_info:
+ author: "Seapath"
+ description: configures timemaster
+ license: Apache-2.0
+ min_ansible_version: 2.9.10
+ platforms:
+ - name: Debian
+ versions:
+ - all
+dependencies: []
diff --git a/roles/timemaster/tasks/main.yml b/roles/timemaster/tasks/main.yml
new file mode 100644
index 000000000..1c0e9daca
--- /dev/null
+++ b/roles/timemaster/tasks/main.yml
@@ -0,0 +1,54 @@
+# Copyright (C) 2024 RTE
+# SPDX-License-Identifier: Apache-2.0
+
+---
+- include_vars: "{{ ansible_distribution }}.yml"
+
+- name: Populate service facts
+ service_facts:
+- name: stop and disable systemd-timesyncd if it exists
+ service:
+ name: "systemd-timesyncd"
+ state: stopped
+ enabled: false
+ when: "'systemd-timesyncd.service' in services"
+- name: Create timemaster configuration
+ template:
+ src: timemaster.conf.j2
+ dest: "{{ path_timemaster_conf }}"
+ register: timemasterconf1
+- name: comment pool configuration in chrony.conf
+ replace:
+ path: "{{ path_chrony_conf }}"
+ regexp: '^(pool .*)'
+ replace: '#\1'
+ register: timemasterconf2
+- name: Create timemaster.service.d directory
+ file:
+ path: /etc/systemd/system/timemaster.service.d/
+ state: directory
+ owner: root
+ group: root
+ mode: 0755
+- name: Copy timemaster.service overide
+ template:
+ src: timemaster.service.j2
+ dest: /etc/systemd/system/timemaster.service.d/override.conf
+ register: timemasterconf3
+- name: Enable timemaster
+ service:
+ name: "timemaster"
+ enabled: true
+- name: restart timemaster if necessary
+ service:
+ name: "timemaster"
+ state: restarted
+ enabled: true
+ daemon_reload: true
+ when:
+ - timemasterconf1.changed or timemasterconf2.changed or timemasterconf3.changed
+- name: stop and disable chrony
+ service:
+ name: "{{ service_name_chrony }}"
+ state: stopped
+ enabled: false
diff --git a/roles/timemaster/templates/timemaster.conf.j2 b/roles/timemaster/templates/timemaster.conf.j2
new file mode 100644
index 000000000..95e5391a5
--- /dev/null
+++ b/roles/timemaster/templates/timemaster.conf.j2
@@ -0,0 +1,77 @@
+# Configuration file for timemaster
+
+{% if ptp_interface is defined %}
+[ptp_domain 0]
+ntp_options poll 0
+{% if ptp_vlanid is defined %}
+interfaces {{ ptp_interface + '.' + ptp_vlanid|string }}
+{% else %}
+interfaces {{ ptp_interface }}
+{% endif %}
+delay 1e-9
+# delay:
+# This option sets the NTP delay of the source (in seconds). Half of this value
+# is included in the maximum assumed error which is used in the source selection
+# algorithm. Increasing the delay is useful to avoid having no majority in the
+# source selection or to make it prefer other sources.
+# The default is 1e-9 (1 nanosecond).
+# Leave as default to leave the error be computed via the std dev of measured samples.
+{% endif %}
+
+[timemaster]
+ntp_program chronyd
+
+[chrony.conf]
+include {{ path_chrony_conf }}
+{% if ntp_servers is defined %}
+{%- for line in ntp_servers %}{% if ptp_interface is not defined and loop.index==1 %}server {{ line }} trust iburst maxsamples 10 minsamples 10 maxpoll 6 minpoll 6
+{% else %}server {{ line }} iburst maxsamples 10 minsamples 10 maxpoll 6 minpoll 6
+{% endif %}{% endfor %}
+{% endif %}
+
+[ntp.conf]
+includefile /etc/ntp.conf
+
+[ptp4l.conf]
+slaveOnly 1
+
+# IEC 61850-9-3 Profile
+# (from : https://en.wikipedia.org/wiki/IEC/IEEE_61850-9-3)
+network_transport {{ ptp_network_transport | default('L2') }}
+delay_mechanism {{ ptp_delay_mechanism | default('P2P') }}
+domainNumber 0
+
+# Announce interval: 1s
+logAnnounceInterval 0
+
+# Sync interval: 1 s
+logSyncInterval 0
+
+# Pdelay interval: 1 s
+logMinPdelayReqInterval 0
+operLogPdelayReqInterval 0
+
+# Announce receipt time-out: 3 s (fixed)
+announceReceiptTimeout 3
+
+# Slave-only priority :
+priority1 255
+priority2 255
+# Default clock class : any specialised clock will be better (ie a GPS Grand Master Clock)
+clockClass 248
+
+[chronyd]
+path /usr/sbin/chronyd
+
+[ntpd]
+path /usr/sbin/ntpd
+options -u ntp:ntp -g
+
+[phc2sys]
+path /usr/sbin/phc2sys
+
+[ptp4l]
+path /usr/sbin/ptp4l
+# -l 4: "Reasonable" loglevel (synchronisation status change, init, ...)
+# -l 6: debug loglevel (PHC regulation values, offset every second)
+options --step_threshold 0.00001 -l 4
diff --git a/roles/timemaster/templates/timemaster.service.j2 b/roles/timemaster/templates/timemaster.service.j2
new file mode 100644
index 000000000..a5a8243ef
--- /dev/null
+++ b/roles/timemaster/templates/timemaster.service.j2
@@ -0,0 +1,12 @@
+[Unit]
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+{% if ptp_interface is defined %}{% if ptp_vlanid is defined %}
+ExecStartPre=bash -c "while true; do ip addr show {{ ptp_interface + '.' + ptp_vlanid|string }} && break; sleep 1; done"
+{% else %}
+After=sys-subsystem-net-devices-{{ ptp_interface }}.device
+ExecStartPre=bash -c "while true; do ip addr show {{ ptp_interface }} && break; sleep 1; done"
+{% endif %}
+{% endif %}
diff --git a/roles/timemaster/vars/CentOS.yml b/roles/timemaster/vars/CentOS.yml
new file mode 100644
index 000000000..8fe0bd106
--- /dev/null
+++ b/roles/timemaster/vars/CentOS.yml
@@ -0,0 +1,7 @@
+# Copyright (C) 2024 RTE
+# SPDX-License-Identifier: Apache-2.0
+
+---
+path_timemaster_conf: "/etc/timemaster.conf"
+path_chrony_conf: "/etc/chrony.conf"
+service_name_chrony: "chronyd"
diff --git a/roles/timemaster/vars/Debian.yml b/roles/timemaster/vars/Debian.yml
new file mode 100644
index 000000000..06b1d8434
--- /dev/null
+++ b/roles/timemaster/vars/Debian.yml
@@ -0,0 +1,7 @@
+# Copyright (C) 2024 RTE
+# SPDX-License-Identifier: Apache-2.0
+
+---
+path_timemaster_conf: "/etc/linuxptp/timemaster.conf"
+path_chrony_conf: "/etc/chrony/chrony.conf"
+service_name_chrony: "chrony"
diff --git a/src/centos/seapath-config_ovs.service b/src/centos/seapath-config_ovs.service
deleted file mode 100644
index b5921255b..000000000
--- a/src/centos/seapath-config_ovs.service
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (C) 2024 Red Hat, Inc.
-# SPDX-License-Identifier: Apache-2.0
-
-[Unit]
-Description="Configure OVS bridges and ports"
-After=ovs-vswitchd.service
-Before=libvirtd.service
-Before=systemd-networkd.service
-
-[Service]
-Type=oneshot
-RemainAfterExit=true
-ExecStartPre=/usr/share/openvswitch/scripts/ovs-ctl start
-ExecStart=/usr/local/bin/setup_ovs.py
-
-[Install]
-WantedBy=multi-user.target
diff --git a/src/cukinia-tests b/src/cukinia-tests
deleted file mode 160000
index 9847883d2..000000000
--- a/src/cukinia-tests
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 9847883d21069e33770af625aebca3a704330570