Skip to content

Commit

Permalink
fixed merge
Browse files Browse the repository at this point in the history
  • Loading branch information
Lorenzo Terenzi committed Oct 26, 2017
2 parents b25b60c + 1acad3c commit 20240c6
Show file tree
Hide file tree
Showing 10 changed files with 11 additions and 121 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ dataset/train.csv
dataset/*
src/old_models/
config/
.idea/
12 changes: 0 additions & 12 deletions .idea/cs_433_ML_project_1.iml

This file was deleted.

7 changes: 0 additions & 7 deletions .idea/inspectionProfiles/profiles_settings.xml

This file was deleted.

68 changes: 0 additions & 68 deletions .idea/markdown-navigator.xml

This file was deleted.

3 changes: 0 additions & 3 deletions .idea/markdown-navigator/profiles_settings.xml

This file was deleted.

4 changes: 0 additions & 4 deletions .idea/misc.xml

This file was deleted.

8 changes: 0 additions & 8 deletions .idea/modules.xml

This file was deleted.

6 changes: 0 additions & 6 deletions .idea/vcs.xml

This file was deleted.

6 changes: 6 additions & 0 deletions src/ensemble_log_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,13 @@


class Config(object):
<<<<<<< HEAD
"""Configuration object for the classifiers
batch sixe, number of epochs (the amount of time the program goes through de dataset, learning rate is the step
of the gradient, the lambda and mode can be either cross-validation or test"""
=======
"""Contains hyperparameters for the classifiers"""
>>>>>>> 21a3636a014486ada42b979c3e94de1878fbe7b2

def __init__(self, batch_size, num_epochs, learning_rate, lambda_, mode='cv'):
self.batch_size = batch_size
Expand Down
17 changes: 4 additions & 13 deletions src/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,12 @@ def create_csv_submission(ids, y_pred, name):
for r1, r2 in zip(ids, y_pred):
writer.writerow({'Id': int(r1), 'Prediction': int(r2)})

def xavier_init(size):
var = 2/(np.sum(size))
return var * np.random.randn(*size)

def adam(theta, m, v, beta_1, beta_2, learning_rate, gradient, iter_num):
m = (beta_1 * m + (1 - beta_1) * gradient)/(1-beta_1**iter_num)
v = (beta_2 * v + (1 - beta_2) * gradient**2)/(1-beta_2**iter_num)
return theta - learning_rate*m/(v**0.5 + 10**-8), m, v


def dataloader(mode='train', reduced=False):
<<<<<<< HEAD
#TODO: erase the reduced feature since it is not used
=======
"""Load datasets"""
>>>>>>> 21a3636a014486ada42b979c3e94de1878fbe7b2
print("Loading data ...")
file_name = 'dataset/' + mode + '.csv'
with open(file_name) as f:
Expand Down Expand Up @@ -134,6 +128,3 @@ def build_polynomial(x):

base = np.hstack((bias[:, np.newaxis], np.log(abs(1+x)), x, base_mixed, x**2, x**3))
return base

if __name__ == '__main__':
pass

0 comments on commit 20240c6

Please sign in to comment.