-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathvarying_loopy.py
131 lines (103 loc) · 3.94 KB
/
varying_loopy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# coding: utf-8
'''
Experiments of alpha-BP on markov random field with graphs generated by ER model.
This is to produce results in Figure 3a in amp.pdf, Mismatch between MAP and α-BP on (b) MIMO detection: α-BP without prior binary MRF.
'''
import numpy as np
import multiprocessing as mp
from multiprocessing import Pool
import matplotlib
import matplotlib.pyplot as plt
import itertools
from collections import defaultdict
from joblib import Parallel, delayed
from scipy.stats import multivariate_normal
import sys
# load the algorithms
sys.path.append("./src")
from loopy_modules import LoopyBP, AlphaBP, ML
from utils import channel_component, sampling_noise, sampling_signal, sampling_H,real2complex, ERsampling_S
# configuration of the experiments
class hparam(object):
# vector length of x = num_tx, section 1 in amp.pdf
num_tx = 16
# vector length of y = num_rx, section 1 in amp.pdf
num_rx = 16
constellation = [int(-1), int(1)]
soucrce_prior = [0.5, 0.5]
signal_var = 1
connect_prob = np.linspace(0.0, 0.9, 10)
# number of monte carlo simulations per point in the experiment figure
monte = 2000
stn_var =1
# power_n = 4./3
# EC_beta = 0.2
alpha = None
algos = {"LoopyBP": {"detector": LoopyBP, "alpha": None},
"AlphaBP, 0.2": {"detector": AlphaBP, "alpha": 0.2},
"AlphaBP, 0.4": {"detector": AlphaBP, "alpha": 0.4},
"AlphaBP, 0.6": {"detector": AlphaBP, "alpha": 0.6},
"AlphaBP, 0.8": {"detector": AlphaBP, "alpha": 0.8},
"AlphaBP, 1.2": {"detector": AlphaBP, "alpha": 1.2}
}
iter_num = 50
for _, value in algos.items():
value["ser"] = []
def task(erp):
'''
Given the snr value, do the experiment with setting defined in hparam
'''
tmp = dict()
for name,_ in hparam.algos.items():
tmp[name] = []
for monte in range(hparam.monte):
# sampling the S and b for exponential function
S, b = ERsampling_S(hparam, erp)
# compute the joint ML detection
detectML = ML(hparam)
solution = detectML.detect(S, b)
for key, method in hparam.algos.items():
hparam.alpha = method['alpha']
detector = method['detector'](None, hparam)
detector.fit(S=S,
b=b,
stop_iter=hparam.iter_num)
estimated_symbol = detector.detect_signal_by_mean()
error = np.sum(np.array(solution) != np.array(estimated_symbol))
tmp[key].append(error)
# performance should be made by comparing with ML
performance = {"erp": erp}
for key, method in hparam.algos.items():
#method["ser"].append( np.mean(tmp[key])/hparam.num_tx )
performance[key] = 1- np.mean(tmp[key])/hparam.num_tx
return performance
# begin the experiment
if (__name__ == '__main__'):
results = []
def collect_result(result):
global results
results.append(result)
task(hparam.connect_prob[0])
pool = mp.Pool(mp.cpu_count())
results = pool.map(task, list(hparam.connect_prob))
pool.close()
performance = defaultdict(list)
#for the_result in RESULTS:
for connect_prob in list(hparam.connect_prob):
for the_result in results:
if the_result["erp"] == connect_prob:
for key, _ in hparam.algos.items():
performance[key].append( the_result[key] )
# for snr in hparam.snr:
marker_list = ["o", "<", "+", ">", "v", "1", "2", "3", "8"]
iter_marker_list = iter(marker_list)
fig, ax = plt.subplots()
for key, method in hparam.algos.items():
ax.plot(hparam.connect_prob, performance[key],
label = key,
marker=next(iter_marker_list))
ax.legend()
ax.set(xlabel="ERP", ylabel="SER")
ax.grid()
fig.savefig("figures/erp_experiment.pdf")
plt.show()