-
Notifications
You must be signed in to change notification settings - Fork 2
/
alpha_compare.py
156 lines (129 loc) · 5.43 KB
/
alpha_compare.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
# coding: utf-8
'''
Experiments to produce results in Figure 3b in amp.pdf, MIMO detection: α-BP without prior
The comparison between algorithms alpha-BP with different alpha value, mmse, MAP
'''
# package dependencies
import numpy as np
import multiprocessing as mp
from tqdm import tqdm
from multiprocessing import Pool
import matplotlib
matplotlib.rcParams.update({'font.size': 18})
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import itertools
from collections import defaultdict
from joblib import Parallel, delayed
from scipy.stats import multivariate_normal
import sys
# import algorithms
sys.path.append("./src")
from modules import GaussianDiag, EP, MMSE, PowerEP, StochasticEP, ExpansionEP, ExpansionPowerEP, ExpectationConsistency, LoopyBP, LoopyMP, PPBP, AlphaBP, MMSEalphaBP, ML, VariationalBP, MMSEvarBP, EPalphaBP
from utils import channel_component, sampling_noise, sampling_signal, sampling_H,real2complex
# configuration of experiments
class hparam(object):
# vector length of x = num_tx *2, section 1 in amp.pdf
num_tx = 4
# vector length of y = num_rx *2, section 1 in amp.pdf
num_rx = 4
# the finite set A, section 1 in amp.pdf
constellation = [int(-1), int(1)]
soucrce_prior = [0.5, 0.5]
signal_var = 1
snr = np.linspace(1, 40, 10)
# number of monte carlo simulations per point in the experiment figure
monte = 5
power_n = 4./3
# update dumping parameter for EC algorithm
EC_beta = 0.2
alpha = None
############## observer effect of Alpha for linear system ########
algos = {"MMSE": {"detector": MMSE, "legend": "MMSE"},
"ML": {"detector": ML, "legend": "MAP"},
"LoopyBP": {"detector": LoopyBP, "legend": "BP"},
"AlphaBP, 0.2": {"detector": AlphaBP, "alpha": 0.2, "legend":r'$\alpha$-BP, 0.2'},
"AlphaBP, 0.4": {"detector": AlphaBP, "alpha": 0.4, "legend":r'$\alpha$-BP, 0.4'},
"AlphaBP, 0.8": {"detector": AlphaBP, "alpha": 0.6, "legend":r'$\alpha$-BP, 0.6'},
"AlphaBP, 1.2": {"detector": AlphaBP, "alpha": 0.8, "legend":r'$\alpha$-BP, 0.8'}
}
iter_num = {"EP": 10,
"EC": 50,
"LoopyBP": 50,
"PPBP": 50,
"AlphaBP": 50,
"MMSEalphaBP": 50,
"VariationalBP":50,
"EPalphaBP": 50,
"MMSEvarBP":50,
"LoopyMP": 50}
for _, value in algos.items():
value["ser"] = []
def task(snr):
'''
Given the snr value, do the experiment with setting defined in hparam
'''
tmp = dict()
for name,_ in hparam.algos.items():
tmp[name] = []
for monte in tqdm(range(hparam.monte)):
x, true_symbol = sampling_signal(hparam)
#noise variance in control by SNR in DB
noise, noise_var = sampling_noise(hparam=hparam, snr=snr)
channel = sampling_H(hparam)
noised_signal = np.dot(channel,x) + noise
for key, method in hparam.algos.items():
if key is "MMSE" or key is "ML":
#### mes detection
detector = method["detector"](hparam)
power_ratio = noise_var/hparam.signal_var
estimated_symbol = detector.detect(y=noised_signal, channel=channel, power_ratio=power_ratio)
#estimated_symbol = real2complex(np.sign(detected_by_mmse))
else:
if "Alpha" in key or "alpha" in key:
hparam.alpha = method['alpha']
detector = method['detector'](noise_var, hparam)
detector.fit(channel=channel,
noise_var=noise_var,
noised_signal=noised_signal,
stop_iter=50)
estimated_symbol = detector.detect_signal_by_mean()
est_complex_symbol = real2complex(estimated_symbol)
error = np.sum(true_symbol != est_complex_symbol)
tmp[key].append(error)
performance = {"snr": snr}
for key, method in hparam.algos.items():
#method["ser"].append( np.mean(tmp[key])/hparam.num_tx )
performance[key] = np.mean(np.array(tmp[key]))/hparam.num_tx
return performance
# begin the experiment
if (__name__ == '__main__'):
results = []
def collect_result(result):
global results
results.append(result)
pool = mp.Pool(mp.cpu_count())
results = pool.map(task, list(hparam.snr))
pool.close()
performance = defaultdict(list)
#for the_result in RESULTS:
for snr in list(hparam.snr):
for the_result in results:
if the_result["snr"] == snr:
for key, _ in hparam.algos.items():
performance[key].append( the_result[key] )
# for snr in hparam.snr:
marker_list = ["o", "<", "+", ">", "v", "1", "2", "3", "8", "*", "h", "d", "D"]
iter_marker_list = iter(marker_list)
fig, ax = plt.subplots()
for key, method in hparam.algos.items():
ax.semilogy(hparam.snr, performance[key],
label = method['legend'],
marker=next(iter_marker_list))
lgd = ax.legend(loc="best", fontsize='small', ncol=2)
ax.set(xlabel="Ratio of Signal to Noise Variance", ylabel="SER")
ax.grid()
fig.savefig("figures/alpha_compare.pdf",
bbox_extra_artists=(lgd,),
bbox_inches='tight')
#plt.show()