Skip to content

Commit

Permalink
Commit for fifty per cent
Browse files Browse the repository at this point in the history
  • Loading branch information
h-mayorquin committed Nov 10, 2017
1 parent 89ab75b commit 3263001
Show file tree
Hide file tree
Showing 8 changed files with 2,409 additions and 84 deletions.
2 changes: 1 addition & 1 deletion analysis_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def calculate_recall_time_quantities(manager, T_recall, T_cue, n, sequences):
mean = 0
std = 0

return total_sequence_time, mean, std, success
return total_sequence_time, mean, std, success, timings


def calculate_compression_factor(manager, training_time, exclude_extrema=True, remove=0):
Expand Down
8 changes: 4 additions & 4 deletions network.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,8 @@ def update_continuous(self, dt=1.0, sigma=None):


class BCPNNFast:
def __init__(self, hypercolumns, minicolumns, beta=None, w=None, G=1.0, tau_m=0.050, g_w=1, g_w_ampa=1.0, g_beta=1,
tau_z_pre=0.150, tau_z_post=0.005, tau_z_pre_ampa=0.005, tau_z_post_ampa=0.005, tau_p=5.0, tau_k=0.010,
def __init__(self, hypercolumns, minicolumns, beta=None, w=None, G=1.0, tau_m=0.050, g_w=1.0, g_w_ampa=1.0, g_beta=1,
tau_z_pre=0.150, tau_z_post=0.005, tau_z_pre_ampa=0.005, tau_z_post_ampa=0.005, tau_p=10.0, tau_k=0.010,
tau_a=2.70, g_a=97.0, g_I=10.0, p=1.0, k=0.0, sigma=1.0, epsilon=1e-20, k_inner=False, prng=np.random):
# Initial values are taken from the paper on memory by Marklund and Lansner also from Phil's paper

Expand Down Expand Up @@ -205,7 +205,7 @@ def __init__(self, hypercolumns, minicolumns, beta=None, w=None, G=1.0, tau_m=0.
self.p = p

# State variables
self.o = np.zeros(self.n_units) * (1.0 / self.minicolumns)
self.o = np.ones(self.n_units) * (1.0 / self.minicolumns)
self.s = np.log(np.ones(self.n_units) * (1.0 / self.minicolumns))
self.beta = np.log(np.ones_like(self.o) * (1.0 / self.minicolumns))

Expand Down Expand Up @@ -248,7 +248,7 @@ def get_parameters(self):

def reset_values(self, keep_connectivity=True):
# State variables
self.o = np.zeros(self.n_units) * (1.0 / self.minicolumns)
self.o = np.ones(self.n_units) * (1.0 / self.minicolumns)
self.s = np.log(np.ones(self.n_units) * (1.0 / self.minicolumns))
self.beta = np.log(np.ones_like(self.o) * (1.0 / self.minicolumns))

Expand Down
67 changes: 38 additions & 29 deletions notebooks/2017-09-09(tau_p effects).ipynb

Large diffs are not rendered by default.

1,388 changes: 1,388 additions & 0 deletions notebooks/2017-11-07(Study of perfect weights recall).ipynb

Large diffs are not rendered by default.

940 changes: 940 additions & 0 deletions notebooks/2017-11-07(Test idea for scaling with only NMDA).ipynb

Large diffs are not rendered by default.

87 changes: 38 additions & 49 deletions play.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,32 +13,45 @@
from plotting_functions import plot_winning_pattern
from analysis_functions import calculate_timings, calculate_recall_success, calculate_recall_success_sequences
from analysis_functions import calculate_excitation_inhibition_ratio, get_excitation, get_inhibition, subsequence
from analysis_functions import calculate_total_connections
from plotting_functions import plot_weight_matrix
from analysis_functions import calculate_angle_from_history, calculate_winning_pattern_from_distances
from analysis_functions import calculate_patterns_timings

from analysis_functions import calculate_recall_success_sequences
from connectivity_functions import create_artificial_manager

def excitation_from_value(value, hypercolumns, minicolumns, n_patterns):
excitation_normal = value * hypercolumns + value * (hypercolumns - 1)
normal = ((n_patterns - 1.0) / n_patterns) * (excitation_normal)

excitation_first = excitation_first = value * (hypercolumns - 1)
first = (1.0 / n_patterns) * (excitation_first)

excitation_total = normal + first

return excitation_total


# Patterns parameters
# Patterns parameters
hypercolumns = 4
minicolumns = 15
n_patterns = 15
minicolumns = 50
n_patterns = 50

# Manager properties
dt = 0.001
T_recalling = 5.0
values_to_save = []
values_to_save = ['o', 's', 'z_pre', 'z_post', 'p_pre', 'p_post', 'p_co', 'z_co', 'w']

# Protocol
training_time = 0.1
inter_sequence_interval = 1.0
inter_sequence_interval = 0.1
inter_pulse_interval = 0.0
epochs = 2

tau_z = 0.150
epochs = 1
tau_z_pre = 0.500

# Build the network
nn = BCPNNFast(hypercolumns, minicolumns, tau_z)
nn = BCPNNFast(hypercolumns, minicolumns, tau_z_pre=tau_z_pre)

# Build the manager
manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save)
Expand All @@ -50,53 +63,29 @@
inter_sequence_interval=inter_sequence_interval, epochs=epochs)

# Train
manager.run_network_protocol(protocol=protocol, verbose=True)

# Artificial matrix
beta = False
value = 1.0
inhibition = -0.3
extension = 3
decay_factor = 0.45
sequence_decay = 0.0
ampa = True
self_influence = False

sequences = [[i for i in range(n_patterns)]]
# epoch_history = manager.run_network_protocol(protocol=protocol, verbose=True)

manager_art = create_artificial_manager(hypercolumns, minicolumns, sequences, value, inhibition, extension, decay_factor,
sequence_decay, dt, BCPNNFast, NetworkManager, ampa, beta, beta_decay=False,
self_influence=self_influence)
z_pre = manager.history['z_pre']


cmap = 'coolwarm'
w = manager.nn.w
w = w[:nn.minicolumns, :nn.minicolumns]
aux_max = np.max(np.abs(w))
time = np.arange(0, training_time * n_patterns + inter_sequence_interval, dt)
time = np.arange(0, 10.0, 0.01)
y = np.exp(-time / 2.0)

fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(time, y)
aux = int(2*training_time / dt)
aux = 0
ax.fill_between(time, 0, y)

ax1 = fig.add_subplot(121)
im1 = ax1.imshow(w, cmap=cmap, interpolation='None', vmin=-aux_max, vmax=aux_max)
ax1.set_title('Training Procedure')
ax1.grid()

divider = make_axes_locatable(ax1)
cax1 = divider.append_axes('right', size='5%', pad=0.05)
ax1.get_figure().colorbar(im1, ax=ax1, cax=cax1)
fig.patch.set_visible(False)
ax.axis('off')
plt.show()
# Save the figure
# fname = './plots/filter.svg'
plt.savefig('test.svg')

w_art = manager_art.nn.w
w_art = w_art[:nn.minicolumns, :nn.minicolumns]
aux_max = np.max(np.abs(w_art))

ax2 = fig.add_subplot(122)

im2 = ax2.imshow(w_art, cmap=cmap, interpolation='None', vmin=-aux_max, vmax=aux_max)
ax2.set_title('Artificial Matrix')
ax2.grid()

divider = make_axes_locatable(ax2)
cax2 = divider.append_axes('right', size='5%', pad=0.05)
ax2.get_figure().colorbar(im2, ax=ax2, cax=cax2)

# Save
Empty file added plots/seminar_50_noise.py
Empty file.
1 change: 0 additions & 1 deletion plotting_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,6 @@ def plot_winning_pattern(manager, ax=None, separators=False, remove=0):
# Plot
with sns.axes_style("whitegrid", {'axes.grid': False}):
if ax is None:
sns.set_style("whitegrid", {'axes.grid': False})
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)

Expand Down

0 comments on commit 3263001

Please sign in to comment.