-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathmmsb.py
137 lines (104 loc) · 3.29 KB
/
mmsb.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import numpy as np
from scipy.special import psi
from formatted_logger import formatted_logger
log = formatted_logger('MMSB', 'info')
class MMSB:
"""
Mixed-membership stochastic block models, Airoldi et al., 2008
"""
def __init__(self, Y, K, alpha = 1):
""" follows the notations in the original NIPS paper
:param Y: node by node interaction matrix, row=sources, col=destinations
:param K: number of mixtures
:param alpha: Dirichlet parameter
:param rho: sparsity parameter
"""
self.N = int(Y.shape[0]) # number of nodes
self.K = K
self.alpha = np.ones(self.K)
self.Y = Y
self.optimize_rho = False
self.max_iter = 100
#variational parameters
self.phi = np.random.dirichlet(self.alpha, size=(self.N, self.N))
self.gamma = np.random.dirichlet([1]*self.K, size=self.N)
self.B = np.random.random(size=(self.K,self.K))
self.rho = (1.-np.sum(self.Y)/(self.N*self.N)) # 1 - density of matrix
def variational_inference(self, converge_ll_fraction=1e-3):
""" run variational inference for this model
maximize the evidence variational lower bound
:param converge_ll_fraction: terminate variational inference when the fractional change of the lower bound falls below this
"""
converge = False
old_ll = 0.
iteration = 0
while not converge:
ll = self.run_e_step()
ll += self.run_m_step()
iteration += 1
#if (old_ll-ll)/ll < converge_ll_fraction or iteration >= self.max_iter:
if iteration >= self.max_iter:
converge = True
log.info('iteration %d, lower bound %.2f' %(iteration, ll))
def run_e_step(self):
""" compute variational expectations
"""
ll = 0.
for p in xrange(self.N):
for q in xrange(self.N):
new_phi = np.zeros(self.K)
for g in xrange(self.K):
new_phi[g] = np.exp(psi(self.gamma[p,g])-psi(np.sum(self.gamma[p,:]))) * np.prod(( (self.B[g,:]**self.Y[p,q])
* ((1.-self.B[g,:])**(1.-self.Y[p,q])) )
** self.phi[q,p,:] )
self.phi[p,q,:] = new_phi/np.sum(new_phi)
new_phi = np.zeros(self.K)
for h in xrange(self.K):
new_phi[h] = np.exp(psi(self.gamma[q,h])-psi(np.sum(self.gamma[q,:]))) * np.prod(( (self.B[:,h]**self.Y[p,q])
* ((1.-self.B[:,h])**(1.-self.Y[p,q])) )
** self.phi[p,q,:] )
self.phi[q,p,:] = new_phi/np.sum(new_phi)
for k in xrange(self.K):
self.gamma[p,k] = self.alpha[k] + np.sum(self.phi[p,:,k]) + np.sum(self.phi[:,p,k])
self.gamma[q,k] = self.alpha[k] + np.sum(self.phi[q,:,k]) + np.sum(self.phi[:,q,k])
return ll
def run_m_step(self):
""" maximize the hyper parameters
"""
ll = 0.
self.optimize_alpha()
self.optimize_B()
if self.optimize_rho:
self.update_rho()
return ll
def optimize_alpha(self):
return
def optimize_B(self):
for g in xrange(self.K):
for h in xrange(self.K):
tmp1=0.
tmp2=0.
for p in xrange(self.N):
for q in xrange(self.N):
tmp = self.phi[p,q,g]*self.phi[q,p,h]
tmp1 += self.Y[p,q]*tmp
tmp2 += tmp
self.B[g,h] = tmp1/tmp2
return
def update_rho(self):
return
def main():
""" test with interaction matrix:
1 1 0 0 0
1 1 0 0 0
0 0 1 1 1
0 0 1 1 1
0 0 1 1 1
"""
Y = np.array([[1,1,0,0,0],[1,1,0,0,0],[0,0,1,1,1],[0,0,1,1,1],[0,0,1,1,1]])
model = MMSB(Y, 2)
model.variational_inference()
print model.gamma
print model.B
if __name__ == '__main__':
main()