forked from ceph/ceph-container
-
Notifications
You must be signed in to change notification settings - Fork 0
/
disk_list.sh
executable file
·131 lines (118 loc) · 3.98 KB
/
disk_list.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#!/bin/bash
set -e
#############
# VARIABLES #
#############
mkdir -p /var/lib/ceph/tmp/
tmp_dir="$(mktemp --directory --tmpdir=/var/lib/ceph/tmp/)"
DOCKER_ENV=""
#############
# FUNCTIONS #
#############
function mandatory_checks () {
ami_privileged
: "${OSD_DEVICE:=none}"
if [[ ${OSD_DEVICE} == "none" ]]; then
log "ERROR: you must submit OSD_DEVICE, e.g: -e OSD_DEVICE=/dev/sda"
exit 1
fi
}
function mount_ceph_data () {
if is_dmcrypt; then
mount /dev/mapper/"${data_uuid}" "$tmp_dir"
else
mount /dev/disk/by-partuuid/"$(blkid -t PARTLABEL="ceph data" -s PARTUUID -o value "${OSD_DEVICE}"*)" "$tmp_dir"
fi
}
function umount_ceph_data () {
umount "$tmp_dir"
}
function get_docker_env () {
mount_ceph_data
cd "$tmp_dir" || return
if [[ -n ${1} ]]; then
if [[ "${1}" == "whoami" ]]; then
if is_dmcrypt; then
cd /var/lib/ceph/osd-lockbox/"$data_uuid" || return
fi
fi
if [[ -L ${1} ]]; then
resolve_symlink "${1}"
elif [[ -f ${1} ]]; then
cat "${1}"
fi
else
osd_type=$(<type)
if [[ "$osd_type" == "filestore" ]]; then
if [[ -L journal_dmcrypt ]]; then
journal_part=$(resolve_symlink journal_dmcrypt)
DOCKER_ENV="$DOCKER_ENV -e OSD_JOURNAL=$journal_part"
elif [[ -L journal ]]; then
journal_part=$(resolve_symlink journal)
DOCKER_ENV="$DOCKER_ENV -e OSD_JOURNAL=$journal_part"
fi
# NOTE(leseb):
# For bluestore we return the full device, not the partition
# because apply_ceph_ownership_to_disks will determine the partitions
# We could probably make this easier...
elif [[ "$osd_type" == "bluestore" ]]; then
if [[ -L block.db_dmcrypt ]]; then
block_db_part=$(resolve_symlink block.db_dmcrypt)
DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_DB=${block_db_part%?}"
elif [[ -L block.db ]]; then
block_db_part=$(resolve_symlink block.db)
DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_DB=${block_db_part%?}"
fi
if [[ -L block.wal_dmcrypt ]]; then
block_wal_part=$(resolve_symlink block.wal_dmcrypt)
DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_WAL=${block_wal_part%?}"
elif [[ -L block.wal ]]; then
block_wal_part=$(resolve_symlink block.wal)
DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_WAL=${block_wal_part%?}"
fi
else
log "ERROR: unrecognized OSD type: $osd_type"
fi
echo "$DOCKER_ENV"
fi
cd || return
umount_ceph_data
}
function start_disk_list () {
mandatory_checks
if is_dmcrypt; then
# creates /dev/mapper/<uuid> for dmcrypt
# usually after a reboot they don't go created
udevadm trigger
# latest versions of ceph-disk (after bluestore) put the lockbox partition on 5
# where already deployed clusters with an earlier version of ceph-disk will do that
# on partition 3
local lock_partition_num=5
if [ ! -b "$(dev_part "${OSD_DEVICE}" $lock_partition_num)" ]; then
lock_partition_num=3
fi
lockbox_uuid=$(get_part_uuid "$(dev_part "${OSD_DEVICE}" $lock_partition_num)")
data_uuid=$(get_part_uuid "$(dev_part "${OSD_DEVICE}" 1)")
data_part=$(dev_part "${OSD_DEVICE}" 1)
mount_lockbox "$data_uuid" "$lockbox_uuid" 1> /dev/null
if [[ ! -e /dev/mapper/"${data_uuid}" ]]; then
open_encrypted_part "${data_uuid}" "${data_part}" "${data_uuid}" 1> /dev/null
fi
if [[ -n "$DISK_LIST_SEARCH" ]]; then
get_docker_env "$DISK_LIST_SEARCH"
else
get_docker_env
fi
close_encrypted_part "${data_uuid}" "${data_part}" "${data_uuid}" 1> /dev/null
umount_lockbox "$lockbox_uuid" 1> /dev/null
else
# this means we called this from osd_activate and that we are asking for a specific dev
# the idea is to pass as a variable the 'type' we are looking for and we get the partition back
# e.g: start_disk_list journal, will return /dev/sda2
if [[ -n $DISK_LIST_SEARCH ]]; then
get_docker_env "$DISK_LIST_SEARCH"
else
get_docker_env
fi
fi
}