Skip to content

Commit

Permalink
Incorrect number of hosts down reported
Browse files Browse the repository at this point in the history
tendrl-bug-id: Tendrl#1076
bugzilla: 1687333

Signed-off-by: GowthamShanmugasundaram <gshanmug@redhat.com>
  • Loading branch information
GowthamShanmugam committed Apr 27, 2019
1 parent e30ac2c commit 3748794
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 42 deletions.
94 changes: 53 additions & 41 deletions tendrl/commons/objects/node_context/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,11 @@ def render(self):
def save(self, update=True, ttl=None):
super(NodeContext, self).save(update)
status = self.value + "/status"
if self.status == "UP" and ttl is None:
# Set ttl always when node status in up
ttl = int(
NS.config.data.get("sync_interval", 60)
)
if ttl:
self._ttl = ttl
try:
Expand All @@ -110,54 +115,61 @@ def on_change(self, attr, prev_value, current_value):
node_id=self.node_id,
integration_id=_tc.integration_id
).load()
if current_value is None and str(_cnc.is_managed).lower() == "yes":
if current_value is None and _tc.integration_id:
self.status = "DOWN"
self.save()
msg = "Node {0} is DOWN".format(self.fqdn)
event_utils.emit_event(
"node_status",
self.status,
msg,
"node_{0}".format(self.fqdn),
"WARNING",
node_id=self.node_id,
integration_id=_tc.integration_id
)
# Load cluster_node_context will load node_context
# and it will be updated with latest values
_cnc_new = \
NS.tendrl.objects.ClusterNodeContext(
if str(_cnc.is_managed).lower() == "yes":
msg = "Node {0} is DOWN".format(self.fqdn)
event_utils.emit_event(
"node_status",
self.status,
msg,
"node_{0}".format(self.fqdn),
"WARNING",
node_id=self.node_id,
integration_id=_tc.integration_id,
first_sync_done=_cnc.first_sync_done,
is_managed=_cnc.is_managed
integration_id=_tc.integration_id
)
_cnc_new.save()
del _cnc_new
# Update cluster details
self.update_cluster_details(_tc.integration_id)
_tag = "provisioner/%s" % _tc.integration_id
if _tag in self.tags:
_index_key = "/indexes/tags/%s" % _tag
self.tags.remove(_tag)
self.save()
etcd_utils.delete(_index_key)
if _tc.sds_name in ["gluster", "RHGS"]:
bricks = etcd_utils.read(
"clusters/{0}/Bricks/all/{1}".format(
_tc.integration_id,
self.fqdn
# Load cluster_node_context will load node_context
# and it will be updated with latest values
_cnc_new = \
NS.tendrl.objects.ClusterNodeContext(
node_id=self.node_id,
integration_id=_tc.integration_id,
first_sync_done=_cnc.first_sync_done,
is_managed=_cnc.is_managed
)
)

for brick in bricks.leaves:
_cnc_new.save()
del _cnc_new
# Update cluster details
self.update_cluster_details(_tc.integration_id)
_tag = "provisioner/%s" % _tc.integration_id
if _tag in self.tags:
_index_key = "/indexes/tags/%s" % _tag
self.tags.remove(_tag)
self.save()
try:
etcd_utils.write(
"{0}/status".format(brick.key),
"Stopped"
)
except (etcd.EtcdAlreadyExist, etcd.EtcdKeyNotFound):
etcd_utils.delete(_index_key)
except etcd.EtcdKeyNotFound:
pass
if _tc.sds_name in ["gluster", "RHGS"]:
bricks = etcd_utils.read(
"clusters/{0}/Bricks/all/{1}".format(
_tc.integration_id,
self.fqdn
)
)

for brick in bricks.leaves:
try:
etcd_utils.write(
"{0}/status".format(brick.key),
"Stopped"
)
except (
etcd.EtcdAlreadyExist,
etcd.EtcdKeyNotFound
):
pass
elif current_value == "UP" and str(
_cnc.is_managed).lower() == "yes":
msg = "{0} is UP".format(self.fqdn)
Expand Down
8 changes: 7 additions & 1 deletion tendrl/commons/utils/central_store/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,13 @@ def watch(obj, key):
attr = key.rstrip("/").split("/")[-1]
obj.on_change(attr, prev_val,
cur_val)
except etcd.EtcdKeyNotFound:
except Exception as ex:
# etcd only keeps the responses of the most recent 1000
# events across all etcd keys, So we may receive a 401
# EventIndexCleared error.
if isinstance(ex, etcd.EtcdEventIndexCleared):
continue
# When watch crash then clear key from the watchers
NS._int.watchers.pop(key, None)
return

0 comments on commit 3748794

Please sign in to comment.