diff --git a/05/History for 05_assignment_05.ipynb - 2haein_machine-learning-assignment.pdf b/05/History for 05_assignment_05.ipynb - 2haein_machine-learning-assignment.pdf new file mode 100644 index 0000000..2ffab1f Binary files /dev/null and b/05/History for 05_assignment_05.ipynb - 2haein_machine-learning-assignment.pdf differ diff --git a/05/assignment_05.html b/05/assignment_05.html new file mode 100644 index 0000000..cd882dc --- /dev/null +++ b/05/assignment_05.html @@ -0,0 +1,15093 @@ + + +
+ + +import numpy as np
+import matplotlib.image as img
+import matplotlib.pyplot as plt
+import matplotlib.colors as colors
+
filename = 'assignment_05_data.csv'
+data = np.loadtxt(filename, delimiter = ',')
+
+x = data[0, :] # independent variable
+y = data[1, :] # dependent variable
+
+x_sample = x[::10]
+y_sample = y[::10]
+
+plt.figure(figsize=(16,6))
+
+plt.subplot(121)
+plt.plot(x, y, '-', color = 'blue')
+plt.title('data points')
+
+plt.subplot(122)
+plt.plot(x, y, '-', color = 'blue')
+plt.plot(x_sample, y_sample, 'o', color = 'red')
+plt.title('samples of data points (every 10 points)')
+
+plt.tight_layout()
+plt.show()
+
np.power
def construct_matrix_A(x, p):
+
+ n = len(x)
+ A = np.zeros([n, p])
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+ # complete the blanks
+ #
+ A = np.zeros((n, 1))
+ for i in range(p):
+ PW = np.ones((1, n))*i
+ temp = np.power(x,PW).reshape(n, 1)
+ A = np.hstack((A, temp))
+
+ A = np.delete(A, 0, axis =1)
+ #
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ return A
+
def construct_vector_b(y):
+
+ n = len(y)
+ b = np.zeros([n, 1])
+
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+ # complete the blanks
+ #
+ b = y
+
+ #
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ return b
+
np.matmul, np.linalg.inv, np.sum
def solve_regression(x, y, p):
+
+ z = np.zeros([p, 1])
+ loss = 0
+
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+ # complete the blanks
+ #
+ b = construct_vector_b(y)
+ A = construct_matrix_A(x, p)
+ coeff_matrix = np.matmul(A.T, A)
+ coeff_matrix_inv = np.linalg.inv(coeff_matrix)
+ temp = np.matmul(coeff_matrix_inv, A.T)
+ z = np.matmul(temp, b)
+
+ n = len(x)
+
+ loss = np.sum(np.power(np.matmul(A,z) - b, 2)) / (2*n)
+ #
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ return z, loss
+
np.matmul, np.linalg.inv, np.sum
def solve_regression_with_regularization(x, y, p, alpha):
+
+ z = np.zeros([p, 1])
+ loss = 0
+
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+ # complete the blanks
+ #
+ n = len(x)
+ b = construct_vector_b(y)
+ A = construct_matrix_A(x, p)
+
+ coeff_matrix = np.matmul(A.T, A)
+ alpha_temp = np.identity(p)*alpha*n
+ temp = coeff_matrix +alpha_temp
+ temp = np.matmul(np.linalg.inv(temp), A.T)
+ z = np.matmul(temp, construct_vector_b(y))
+
+
+
+ loss = np.sum(np.power(np.matmul(A,z) - b, 2)) / (2*n) + np.sum(np.power(z,2)) * alpha /2
+
+ #
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ return z, loss
+
np.matmul
def approximate(x, y, p):
+
+ n = len(y)
+ y_hat = np.zeros([n, 1])
+ loss = 0
+
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+ # complete the blanks
+ #
+ A = construct_matrix_A(x, p)
+ (z, loss) = solve_regression(x,y,p)
+ y_hat = np.matmul(A, z)
+
+ #
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ return y_hat, loss
+
def approximate_with_regularization(x, y, p, alpha):
+
+ n = len(y)
+ y_hat = np.zeros([n, 1])
+ loss = 0
+
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+ # complete the blanks
+ #
+ A = construct_matrix_A(x, p)
+ (z, loss) = solve_regression_with_regularization(x,y,p,alpha)
+ y_hat = np.matmul(A, z)
+
+ #
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ return y_hat, loss
+
def function_result_01():
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.title('data points')
+ plt.show()
+
def function_result_02():
+
+ p = 2
+ (y_hat, _) = approximate(x, y, p)
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.plot(x, y_hat, '-', color='red')
+ plt.show()
+
def function_result_03():
+
+ p = 4
+ (y_hat, _) = approximate(x, y, p)
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.plot(x, y_hat, '-', color='red')
+ plt.show()
+
def function_result_04():
+
+ p = 8
+ (y_hat, _) = approximate(x, y, p)
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.plot(x, y_hat, '-', color='red')
+ plt.show()
+
def function_result_05():
+
+ p = 16
+ (y_hat, _) = approximate(x, y, p)
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.plot(x, y_hat, '-', color='red')
+ plt.show()
+
def function_result_06():
+
+ p = 32
+ (y_hat, _) = approximate(x, y, p)
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.plot(x, y_hat, '-', color='red')
+ plt.show()
+
def function_result_07():
+
+ p = 2
+ alpha = 0.1
+ (y_hat, _) = approximate_with_regularization(x, y, p, alpha)
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.plot(x, y_hat, '-', color='red')
+ plt.show()
+
def function_result_08():
+
+ p = 4
+ alpha = 0.1
+ (y_hat, _) = approximate_with_regularization(x, y, p, alpha)
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.plot(x, y_hat, '-', color='red')
+ plt.show()
+
def function_result_09():
+
+ p = 8
+ alpha = 0.1
+ (y_hat, _) = approximate_with_regularization(x, y, p, alpha)
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.plot(x, y_hat, '-', color='red')
+ plt.show()
+
def function_result_10():
+
+ p = 16
+ alpha = 0.1
+ (y_hat, _) = approximate_with_regularization(x, y, p, alpha)
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.plot(x, y_hat, '-', color='red')
+ plt.show()
+
def function_result_11():
+
+ p = 32
+ alpha = 0.1
+ (y_hat, _) = approximate_with_regularization(x, y, p, alpha)
+
+ plt.figure(figsize=(8,6))
+ plt.plot(x, y, '-', color='blue')
+ plt.plot(x, y_hat, '-', color='red')
+ plt.show()
+
def function_result_12():
+
+ p = 4
+ (_, loss) = approximate(x, y, p)
+
+ print('loss = ', loss)
+
def function_result_13():
+
+ p = 16
+ (_, loss) = approximate(x, y, p)
+
+ print('loss = ', loss)
+
def function_result_14():
+
+ p = 4
+ alpha = 0.1
+ (_, loss) = approximate_with_regularization(x, y, p, alpha)
+
+ print('loss = ', loss)
+
def function_result_15():
+
+ p = 16
+ alpha = 0.1
+ (_, loss) = approximate_with_regularization(x, y, p, alpha)
+
+ print('loss = ', loss)
+
number_result = 15
+
+for i in range(number_result):
+ title = '## [RESULT {:02d}]'.format(i+1)
+ name_function = 'function_result_{:02d}()'.format(i+1)
+
+ print('**************************************************')
+ print(title)
+ print('**************************************************')
+ eval(name_function)
+
************************************************** +## [RESULT 01] +************************************************** ++
************************************************** +## [RESULT 02] +************************************************** ++
************************************************** +## [RESULT 03] +************************************************** ++
************************************************** +## [RESULT 04] +************************************************** ++
************************************************** +## [RESULT 05] +************************************************** ++
************************************************** +## [RESULT 06] +************************************************** ++
************************************************** +## [RESULT 07] +************************************************** ++
************************************************** +## [RESULT 08] +************************************************** ++
************************************************** +## [RESULT 09] +************************************************** ++
************************************************** +## [RESULT 10] +************************************************** ++
************************************************** +## [RESULT 11] +************************************************** ++
************************************************** +## [RESULT 12] +************************************************** +loss = 0.05269780686325942 +************************************************** +## [RESULT 13] +************************************************** +loss = 1.208205906835679e-06 +************************************************** +## [RESULT 14] +************************************************** +loss = 0.06379867384617091 +************************************************** +## [RESULT 15] +************************************************** +loss = 0.00844492935699587 ++