-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain_regression.py
90 lines (49 loc) · 2.07 KB
/
main_regression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import tensorflow as tf
import sonnet as snt
import numpy as np
import matplotlib.pyplot as plt
from BNN_MLP import *
from random import shuffle
test_n_samples = 100
TRAINING_STEPS = 4000
# Build the training set
x = np.random.uniform(-4, 4, size=20).reshape((-1, 1))
noise = np.random.normal(0, 9, size=20).reshape((-1, 1))
y = x ** 3 + noise
x_ = np.linspace(-6, 6, 200)
y_ = x_ ** 3
# Initialize the network and optimizer
net = BNN_MLP(n_inputs=1, n_outputs=1, hidden_units=[100], init_mu=0.0, init_rho=0.0, activation=tf.nn.relu, last_activation=tf.identity)
x_placeholder = tf.placeholder(tf.float32, (None,1))
y_placeholder = tf.placeholder(tf.float32, (None,1))
out, log_probs, nll = net(x_placeholder, targets=y_placeholder, sample=True, n_samples=1, loss_function=lambda y, y_target: 0.5*tf.reduce_sum(tf.square(y_target-y), 1) )
loss = log_probs/40 + nll
optim = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optim.minimize( loss )
sess = tf.InteractiveSession()
sess.run( tf.global_variables_initializer() )
for i in range(TRAINING_STEPS):
avg_loss, _ = sess.run([loss, train_op], feed_dict={x_placeholder:x, y_placeholder:y})
if i%100 == 0:
print('Iteration ', i, "loss: ", avg_loss)
# Sample test_n_samples networks and test them at every point, to compute the average prediction and standard deviation
ys = []
for i in range(test_n_samples):
ys.append( sess.run( out, feed_dict={x_placeholder:np.expand_dims(x_,-1)} ) )
ys = np.asarray(ys)
plt.plot(x_, np.mean(ys,0), c='royalblue', label='mean pred')
plt.fill_between(x_, np.squeeze(np.mean(ys,0) - 3*np.std(ys,0)), np.squeeze(np.mean(ys,0) + 3*np.std(ys,0)), color='cornflowerblue', alpha=.5, label='+/- 3 std')
plt.plot( x, y, '*', color='black', label='training data' )
plt.legend()
plt.tight_layout()
plt.savefig("regression.png")
plt.show()
"""
all_stds = []
for l in net.layers:
w_sigma = np.reshape( sess.run(l.w_sigma), [-1] ).tolist()
b_sigma = np.reshape( sess.run(l.b_sigma), [-1] ).tolist()
all_stds += w_sigma + b_sigma
plt.hist(all_stds, 100)
plt.show()
"""