Skip to content

Commit 7114a0c

Browse files
Format code with black
1 parent e72a150 commit 7114a0c

23 files changed

+7459
-2295
lines changed

control/__main__.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,12 @@
1212
from .config import GatewayConfig
1313
from .utils import GatewayLogger
1414

15-
if __name__ == '__main__':
16-
parser = argparse.ArgumentParser(prog="python3 -m control",
17-
description="Manage NVMe gateways",
18-
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
15+
if __name__ == "__main__":
16+
parser = argparse.ArgumentParser(
17+
prog="python3 -m control",
18+
description="Manage NVMe gateways",
19+
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
20+
)
1921
parser.add_argument(
2022
"-c",
2123
"--config",

control/cephutils.py

Lines changed: 79 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -15,28 +15,31 @@
1515
import json
1616
from .utils import GatewayLogger
1717

18+
1819
class CephUtils:
19-
"""Miscellaneous functions which connect to Ceph
20-
"""
20+
"""Miscellaneous functions which connect to Ceph"""
2121

2222
def __init__(self, config):
2323
self.logger = GatewayLogger(config).logger
24-
self.ceph_conf = config.get_with_default("ceph", "config_file", "/etc/ceph/ceph.conf")
24+
self.ceph_conf = config.get_with_default(
25+
"ceph", "config_file", "/etc/ceph/ceph.conf"
26+
)
2527
self.rados_id = config.get_with_default("ceph", "id", "")
2628
self.anagroup_list = []
2729
self.last_sent = time.time()
2830

2931
def execute_ceph_monitor_command(self, cmd):
30-
self.logger.debug(f"Execute monitor command: {cmd}")
31-
with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster:
32-
rply = cluster.mon_command(cmd, b'')
32+
self.logger.debug(f"Execute monitor command: {cmd}")
33+
with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster:
34+
rply = cluster.mon_command(cmd, b"")
3335
self.logger.debug(f"Monitor reply: {rply}")
3436
return rply
37+
3538
def get_gw_id_owner_ana_group(self, pool, group, anagrp):
36-
str = '{' + f'"prefix":"nvme-gw show", "pool":"{pool}", "group":"{group}"' + '}'
39+
str = "{" + f'"prefix":"nvme-gw show", "pool":"{pool}", "group":"{group}"' + "}"
3740
self.logger.debug(f"nvme-show string: {str}")
3841
rply = self.execute_ceph_monitor_command(str)
39-
self.logger.debug(f"reply \"{rply}\"")
42+
self.logger.debug(f'reply "{rply}"')
4043
conv_str = rply[1].decode()
4144
data = json.loads(conv_str)
4245

@@ -45,31 +48,35 @@ def get_gw_id_owner_ana_group(self, pool, group, anagrp):
4548
comp_str = f"{anagrp}: ACTIVE"
4649
for gateway in data["Created Gateways:"]:
4750
if comp_str in gateway["ana states"]:
48-
gw_id = gateway["gw-id"]
49-
self.logger.debug(f"found gw owner of anagrp {anagrp}: gw {gw_id}")
50-
break
51+
gw_id = gateway["gw-id"]
52+
self.logger.debug(f"found gw owner of anagrp {anagrp}: gw {gw_id}")
53+
break
5154
return gw_id
5255

5356
def get_number_created_gateways(self, pool, group):
5457
now = time.time()
55-
if (now - self.last_sent) < 10 and self.anagroup_list :
56-
self.logger.info(f"Caching response of the monitor: {self.anagroup_list}")
57-
return self.anagroup_list
58-
else :
58+
if (now - self.last_sent) < 10 and self.anagroup_list:
59+
self.logger.info(f"Caching response of the monitor: {self.anagroup_list}")
60+
return self.anagroup_list
61+
else:
5962
try:
6063
self.anagroup_list = []
6164
self.last_sent = now
62-
str = '{' + f'"prefix":"nvme-gw show", "pool":"{pool}", "group":"{group}"' + '}'
65+
str = (
66+
"{"
67+
+ f'"prefix":"nvme-gw show", "pool":"{pool}", "group":"{group}"'
68+
+ "}"
69+
)
6370
self.logger.debug(f"nvme-show string: {str}")
6471
rply = self.execute_ceph_monitor_command(str)
65-
self.logger.debug(f"reply \"{rply}\"")
72+
self.logger.debug(f'reply "{rply}"')
6673
conv_str = rply[1].decode()
6774
pos = conv_str.find("[")
6875
if pos != -1:
6976
new_str = conv_str[pos + len("[") :]
70-
pos = new_str.find("]")
71-
new_str = new_str[: pos].strip()
72-
int_str_list = new_str.split(' ')
77+
pos = new_str.find("]")
78+
new_str = new_str[:pos].strip()
79+
int_str_list = new_str.split(" ")
7380
self.logger.debug(f"new_str : {new_str}")
7481
for x in int_str_list:
7582
self.anagroup_list.append(int(x))
@@ -86,17 +93,27 @@ def get_number_created_gateways(self, pool, group):
8693
def fetch_and_display_ceph_version(self):
8794
try:
8895
rply = self.execute_ceph_monitor_command('{"prefix":"mon versions"}')
89-
ceph_ver = rply[1].decode().removeprefix("{").strip().split(":")[0].removeprefix('"').removesuffix('"')
96+
ceph_ver = (
97+
rply[1]
98+
.decode()
99+
.removeprefix("{")
100+
.strip()
101+
.split(":")[0]
102+
.removeprefix('"')
103+
.removesuffix('"')
104+
)
90105
ceph_ver = ceph_ver.removeprefix("ceph version ")
91-
self.logger.info(f"Connected to Ceph with version \"{ceph_ver}\"")
106+
self.logger.info(f'Connected to Ceph with version "{ceph_ver}"')
92107
except Exception:
93108
self.logger.exception(f"Failure fetching Ceph version:")
94109
pass
95110

96111
def fetch_ceph_fsid(self) -> str:
97112
fsid = None
98113
try:
99-
with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster:
114+
with rados.Rados(
115+
conffile=self.ceph_conf, rados_id=self.rados_id
116+
) as cluster:
100117
fsid = cluster.get_fsid()
101118
except Exception:
102119
self.logger.exception(f"Failure fetching Ceph fsid:")
@@ -105,7 +122,9 @@ def fetch_ceph_fsid(self) -> str:
105122

106123
def pool_exists(self, pool) -> bool:
107124
try:
108-
with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster:
125+
with rados.Rados(
126+
conffile=self.ceph_conf, rados_id=self.rados_id
127+
) as cluster:
109128
if cluster.pool_exists(pool):
110129
return True
111130
except Exception:
@@ -116,8 +135,8 @@ def pool_exists(self, pool) -> bool:
116135

117136
def service_daemon_register(self, cluster, metadata):
118137
try:
119-
if cluster: # rados client
120-
daemon_name = metadata['id']
138+
if cluster: # rados client
139+
daemon_name = metadata["id"]
121140
cluster.service_daemon_register("nvmeof", daemon_name, metadata)
122141
self.logger.info(f"Registered {daemon_name} to service_map!")
123142
except Exception:
@@ -128,46 +147,62 @@ def service_daemon_update(self, cluster, status_buffer):
128147
if cluster and status_buffer:
129148
cluster.service_daemon_update(status_buffer)
130149
except Exception:
131-
self.logger.exception(f"Can't update daemon status to service_map!")
150+
self.logger.exception(f"Can't update daemon status to service_map!")
132151

133152
def create_image(self, pool_name, image_name, size) -> bool:
134153
# Check for pool existence in advance as we don't create it if it's not there
135154
if not self.pool_exists(pool_name):
136-
raise rbd.ImageNotFound(f"Pool {pool_name} doesn't exist", errno = errno.ENODEV)
155+
raise rbd.ImageNotFound(
156+
f"Pool {pool_name} doesn't exist", errno=errno.ENODEV
157+
)
137158

138159
image_exists = False
139160
try:
140161
image_size = self.get_image_size(pool_name, image_name)
141162
image_exists = True
142163
except rbd.ImageNotFound:
143-
self.logger.debug(f"Image {pool_name}/{image_name} doesn't exist, will create it using size {size}")
164+
self.logger.debug(
165+
f"Image {pool_name}/{image_name} doesn't exist, will create it using size {size}"
166+
)
144167
pass
145168

146169
if image_exists:
147170
if image_size != size:
148-
raise rbd.ImageExists(f"Image {pool_name}/{image_name} already exists with a size of {image_size} bytes which differs from the requested size of {size} bytes",
149-
errno = errno.EEXIST)
150-
return False # Image exists with an idetical size, there is nothing to do here
171+
raise rbd.ImageExists(
172+
f"Image {pool_name}/{image_name} already exists with a size of {image_size} bytes which differs from the requested size of {size} bytes",
173+
errno=errno.EEXIST,
174+
)
175+
return (
176+
False # Image exists with an idetical size, there is nothing to do here
177+
)
151178

152179
with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster:
153180
with cluster.open_ioctx(pool_name) as ioctx:
154181
rbd_inst = rbd.RBD()
155182
try:
156183
rbd_inst.create(ioctx, image_name, size)
157184
except rbd.ImageExists as ex:
158-
self.logger.exception(f"Image {pool_name}/{image_name} was created just now")
159-
raise rbd.ImageExists(f"Image {pool_name}/{image_name} was just created by someone else, please retry",
160-
errno = errno.EAGAIN)
185+
self.logger.exception(
186+
f"Image {pool_name}/{image_name} was created just now"
187+
)
188+
raise rbd.ImageExists(
189+
f"Image {pool_name}/{image_name} was just created by someone else, please retry",
190+
errno=errno.EAGAIN,
191+
)
161192
except Exception as ex:
162-
self.logger.exception(f"Can't create image {pool_name}/{image_name}")
193+
self.logger.exception(
194+
f"Can't create image {pool_name}/{image_name}"
195+
)
163196
raise ex
164197

165198
return True
166199

167200
def get_image_size(self, pool_name, image_name) -> int:
168201
image_size = 0
169202
if not self.pool_exists(pool_name):
170-
raise rbd.ImageNotFound(f"Pool {pool_name} doesn't exist", errno = errno.ENODEV)
203+
raise rbd.ImageNotFound(
204+
f"Pool {pool_name} doesn't exist", errno=errno.ENODEV
205+
)
171206

172207
with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster:
173208
with cluster.open_ioctx(pool_name) as ioctx:
@@ -176,9 +211,14 @@ def get_image_size(self, pool_name, image_name) -> int:
176211
with rbd.Image(ioctx, image_name) as img:
177212
image_size = img.size()
178213
except rbd.ImageNotFound:
179-
raise rbd.ImageNotFound(f"Image {pool_name}/{image_name} doesn't exist", errno = errno.ENODEV)
214+
raise rbd.ImageNotFound(
215+
f"Image {pool_name}/{image_name} doesn't exist",
216+
errno=errno.ENODEV,
217+
)
180218
except Exception as ex:
181-
self.logger.exception(f"Error while trying to get the size of image {pool_name}/{image_name}")
219+
self.logger.exception(
220+
f"Error while trying to get the size of image {pool_name}/{image_name}"
221+
)
182222
raise ex
183223

184224
return image_size

0 commit comments

Comments
 (0)