Skip to content

Commit

Permalink
Fixed but at what cost
Browse files Browse the repository at this point in the history
  • Loading branch information
Parvfect committed Mar 13, 2024
1 parent e73dea3 commit 88c7ff1
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 56 deletions.
53 changes: 12 additions & 41 deletions distracted_coupon_collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
import re
from cProfile import Profile
from coupon_collector import get_parameters, get_parameters_sc_ldpc
from tanner_qspa import TannerQSPA

def choose_symbols(n_motifs, picks):
"""Creates Symbol Array as a combination of Motifs
Expand Down Expand Up @@ -63,7 +64,8 @@ def get_parameters(n_motifs, n_picks, dv, dc, k, n, ffdim, zero_codeword=False,

symbol_keys = np.arange(0, ffdim)

graph = VariableTannerGraph(dv, dc, k, n, ffdim=ffdim)
#graph = VariableTannerGraph(dv, dc, k, n, ffdim=ffdim)
graph = TannerQSPA(dv, dc, k, n, ffdim=ffdim)

if Harr is None:
Harr = r.get_H_arr(dv, dc, k, n)
Expand Down Expand Up @@ -110,7 +112,8 @@ def get_parameters_sc_ldpc(n_motifs, n_picks, L, M, dv, dc, k, n, ffdim, zero_co
else:
dv, dc = get_dv_dc(dv, dc, k, n, Harr)

graph = VariableTannerGraph(dv, dc, k, n, ffdim=ffdim)
#graph = VariableTannerGraph(dv, dc, k, n, ffdim=ffdim)
graph = TannerQSPA(dv, dc, k, n, ffdim=ffdim)
graph.establish_connections(Harr)

if H is None and G is None:
Expand Down Expand Up @@ -193,39 +196,7 @@ def simulate_reads(C, symbols, read_length, P, n_motifs, n_picks):

return likelihood_arr

def simulate(Harr, GFH, GFK, symbols, P, n_code, k, read_length=10, max_iter=10, cc=False):

ffdim = 67
n_motifs, n_picks = 8, 4
dv, dc = 3, 9

m_checks = GFH.shape[0]

GF = galois.GF(ffdim)

input_arr = [random.randint(0,66) for i in range(k)]
C = np.matmul(GF(input_arr), GFK)
#print(f"Codeword is \n{C}")

symbol_likelihoods_arr = np.array(simulate_reads(C, symbols, read_length, P, n_motifs, n_picks))

assert symbol_likelihoods_arr.shape == (n_code, ffdim)

if cc:
graph = VariableTannerGraph(dv, dc,k, n_code, ffdim=ffdim)
graph.establish_connections(Harr)
graph.assign_values(symbol_likelihoods_arr)
#z = graph.coupon_collector_decoding()
z = graph.qspa_decoding(GFH, GF)
else:
decoder = QSPADecoder(n_code, m_checks, GF, GFH)
# Will have to replace that max Iter with the break condition that we had before
z = decoder.decode(symbol_likelihoods_arr)

return np.array_equal(C, z)


def decoding_errors_fer(k, n, dv, dc, P, H, G, GF, graph, C, symbols, n_motifs, n_picks, decoder=None, decoding_failures_parameter=5, max_iterations=10, iterations=50, uncoded=False, bec_decoder=False, label=None, code_class="", read_lengths=np.arange(1,20)):
def decoding_errors_fer(k, n, dv, dc, P, H, G, GF, graph, C, symbols, n_motifs, n_picks, decoder=None, decoding_failures_parameter=10, max_iterations=50, iterations=50, uncoded=False, bec_decoder=False, label=None, code_class="", read_lengths=np.arange(1,20)):

frame_error_rate = []
max_iterations = max_iterations
Expand All @@ -237,7 +208,7 @@ def decoding_errors_fer(k, n, dv, dc, P, H, G, GF, graph, C, symbols, n_motifs,
symbol_likelihoods_arr = np.array(simulate_reads(C, symbols, i, P, n_motifs, n_picks))

if not decoder:
z = graph.qspa_decoding(symbol_likelihoods_arr, H, GF)
z = graph.qspa_decode(symbol_likelihoods_arr, H, GF)
else:
z = decoder.decode(symbol_likelihoods_arr, max_iter=20)

Expand Down Expand Up @@ -298,14 +269,14 @@ def run_fer(n_motifs, n_picks, dv, dc, k, n, L, M, ffdim, P, code_class="", iter
with Profile() as prof:
n_motifs, n_picks = 8, 4
dv, dc, ffdim, P = 3, 9, 67, 2 * 0.038860387943791645
k, n = 10, 15
L, M = 20, 36
k, n = 180, 270
L, M = 12, 36
read_length = 6
read_lengths = np.arange(12, 13)
read_lengths = np.arange(7, 13)


#run_fer(n_motifs, n_picks, dv, dc, k, n, L, M, ffdim, P, code_class="", uncoded=False, zero_codeword=True, bec_decoder=False, graph_decoding=False, read_lengths=read_lengths)
run_fer(n_motifs, n_picks, dv, dc, k, n, L, M, ffdim, P, code_class="", uncoded=False, zero_codeword=True, bec_decoder=False, graph_decoding=True, read_lengths=read_lengths)
run_fer(n_motifs, n_picks, dv, dc, k, n, L, M, ffdim, P, code_class="", uncoded=False, zero_codeword=False, bec_decoder=False, graph_decoding=False, read_lengths=read_lengths)
run_fer(n_motifs, n_picks, dv, dc, k, n, L, M, ffdim, P, code_class="", uncoded=False, zero_codeword=False, bec_decoder=False, graph_decoding=True, read_lengths=read_lengths)
(
Stats(prof)
.strip_dirs()
Expand Down
30 changes: 15 additions & 15 deletions tanner_qspa.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ def initialize_cn_links(self):
for cn in self.cns:
cn_index = cn.identifier
for vn_index in cn.links:
self.cn_links[(cn_index, vn_index)] = 0
self.cn_links[(cn_index, vn_index)] = np.zeros(67)

def decode(self, symbol_likelihood_arr, H, GF, max_iterations=50):
def qspa_decode(self, symbol_likelihood_arr, H, GF, max_iterations=5):
"""Decodes using QSPA """

self.GF = GF
Expand All @@ -40,23 +40,23 @@ def decode(self, symbol_likelihood_arr, H, GF, max_iterations=50):
# Initilizing the CN Links
self.initialize_cn_links()

prev_max_prob_codeword = self.get_max_prob_codeword(self.P, GF)
prev_max_prob_codeword = self.get_max_prob_codeword(symbol_likelihood_arr, GF)

iterations = 0

#for i in range(max_iterations):
while(True):

self.cn_update_qspa(copy_links)
self.cn_update()

max_prob_codeword = self.get_max_prob_codeword(self.P, GF)
max_prob_codeword = self.get_max_prob_codeword(symbol_likelihood_arr, GF)

parity = not np.matmul(H, max_prob_codeword).any()
if parity:
print("Decoding converges")
return max_prob_codeword

self.vn_update_qspa()
self.vn_update(symbol_likelihood_arr)

if np.array_equal(max_prob_codeword, prev_max_prob_codeword) or iterations > max_iterations:
break
Expand Down Expand Up @@ -92,7 +92,7 @@ def get_max_prob_codeword(self, P, GF):
for cn in vn.links:

# Update Symbol Probability as product of the CN Message
probs[a] *= self.get_cn_link_weight(cn, vn_index)[a]
probs[a] *= self.cn_links[(cn, vn_index)][a]

# Most likely symbol is the Symbol with the highest probability
z[vn_index] = np.argmax(probs)
Expand All @@ -101,31 +101,31 @@ def get_max_prob_codeword(self, P, GF):



def cn_update_qspa(self):
def cn_update(self):
""" CN Update for the QSPA Decoder. For each CN, performs convolutions for individual VN's as per the remaining links and updates the individual link values after finishing each link. Repeats for all the CN's """

# Iterate through all the CNs
for cn in self.cns:

cn_index = cn.identifier

vns = cn.links

# Iterating through all the VN Links of the Check node
for vn in vns:

# Getting all the remaining VNS
conv_indices = [idx for idx in vns if idx != vn]

# Getting convolution of all the vns
pdf = conv_circ(self.get_vn_link_weight(cn_index, conv_indices[0]), self.get_vn_link_weight(cn_index, conv_indices[1]))
pdf = conv_circ(self.vn_links[(cn_index, conv_indices[0])], self.vn_links[(cn_index, conv_indices[1])])

for indice in conv_indices[2:]:
pdf = conv_circ(pdf, self.get_vn_link_weight(cn_index, indice))
#new_pdfs.append(pdf[self.idx_shuffle])
pdf = conv_circ(pdf, self.vn_links[(cn_index, indice)])

# Updating the CN Link weight with the conv value
self.cn_links[(cn_index, vn)] = pdf[self.idx_shuffle]

def vn_update_qspa(self):
def vn_update(self, P):
""" Updates the CN as per the QSPA Decoding. Conditional Probability of a Symbol being favoured yadayada """

# Use the CN links to update the VN links by taking the favoured probabilities
Expand All @@ -139,7 +139,7 @@ def vn_update_qspa(self):

for cn in vn.links:

self.vn_links[(cn, vn_index)][a] = self.P[vn_index][a]
self.vn_links[(cn, vn_index)][a] = P[vn_index][a]

for t in vn.links:

Expand All @@ -148,7 +148,7 @@ def vn_update_qspa(self):

self.vn_links[(cn, vn_index)][a] *= self.cn_links[(t, vn_index)][a]

sum_copy_links = np.einsum('i->', self.vn_links[(cn, vn_index)]) # Seems to be twice as fast or smth
sum_copy_links = np.einsum('i->', self.vn_links[(cn, vn_index)])
self.vn_links[(cn, vn_index)] = self.vn_links[(cn, vn_index)]/sum_copy_links


Expand Down

0 comments on commit 88c7ff1

Please sign in to comment.