-
Notifications
You must be signed in to change notification settings - Fork 0
/
robustness.py
121 lines (106 loc) · 4.1 KB
/
robustness.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# a set of functions related to the robustness of a graph, and to the
# robustness of the diffusion of messages in a graph
from collections import defaultdict
from random import choice
import networkx as nx
import numpy as np
from scipy import stats
import sys
def computeRobustness(graph, tests=100, mode="simple",
purge="links", nodeList=""):
""" Compute robustness with a percolation approach.
Parameters:
----------
graph: nx graph
the graph to analyze
tests: int
the number of tests to perform in each graph
mode: str
"simple" == remove random link/nodes
"core" == remove only links/nodes in the core graph (no leaves)
purge: str
"links" == remove edges (bond percolation)
"nodes" == remove nodes (edge percolation)
nodeList: list
instead of simulating random failures, pass a list of nodes
to be progressively removed
Returns: {}
a dictionary of items "elements removed":"average robustness"
"""
links = []
weights = []
# build a list of nodes/links that can be removed
for l in graph.edges(data=True):
if mode == "core":
if len(graph[l[0]]) != 1 and \
len(graph[l[1]]) != 1:
links.append((l[0], l[1]))
if "weight" in l[2]:
weights.append(float(l[2]["weight"]))
else:
weights.append(1.0)
else:
links.append((l[0], l[1]))
if "weight" in l[2]:
weights.append(float(l[2]["weight"]))
else:
weights.append(1.0)
if purge == "links":
totWeight = sum(weights)
# this will increase the probablity of failures for links
# that have a high weight. it is meaningful when the weight
# represents the badness of the link
normalizedWeight = [l/totWeight for l in weights]
#normalizedWeight = [1.0/len(links) for l in links]
custDist = stats.rv_discrete(values=(range(len(links)),
normalizedWeight), name="custDist")
mainCSize = defaultdict(list)
mainNonCSize = defaultdict(list)
nlen = float(len(graph.nodes()))
elen = float(len(graph.edges()))
for i in range(tests):
purgedGraph = graph.copy()
purgedLinks = []
purgedNodes = []
nodeRank = nodeList[:]
if purge == "links":
purgedItems = int(elen/2)
elif purge == "nodes":
purgedItems = int(nlen/2)
for k in range(1,purgedItems):
if purge == "links":
r = custDist.rvs()
if len(purgedLinks) >= elen:
print >> sys.stderr, "Trying to purge",k,"links",\
"from a graph with",elen," total links"
break
# get a random item that we did not remove yet
while (r in purgedLinks):
r = custDist.rvs()
purgedLinks.append(r)
l = links[r]
purgedGraph.remove_edge(l[0],l[1])
elif purge == "nodes":
if len(purgedNodes) >= nlen:
print >> sys.stderr, "Trying to purge",k,"nodes",\
"from a graph with", nlen," total nodes"
break
if nodeRank == "":
r = choice(purgedGraph.nodes())
purgedNodes.append(r)
else :
r = nodeRank.pop()
purgedNodes.append(r)
purgedGraph.remove_node(r)
compList = nx.connected_components(purgedGraph)
mainCSize[k].append(len(compList[0])/nlen)
compSizes = [len(h) for h in compList[1:]]
if len(compSizes) == 0:
mainNonCSize[k].append(0)
else:
mainNonCSize[k].append(
np.average(compSizes)/nlen)
mainCSizeAvg = {}
for k, tests in mainCSize.items():
mainCSizeAvg[k] = np.average(tests)
return mainCSizeAvg, mainNonCSize