-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.yml
98 lines (98 loc) · 2.72 KB
/
config.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# path to datasets folder
datasets_root: "/store/dabeaq/datasets/"
# path to the St-Charles dataset
sc_root: "/store/dabeaq/datasets/litiv/stcharles2018-v04/"
# path to the LITIV dataset
litiv_root: "/store/dabeaq/datasets/litiv/bilodeauIR/"
# path to where you want to have the rectified images from the St-Charles dataset (must be called rectified)
rectified_sc_root: "/store/dabeaq/datasets/litiv/stcharles2018-v04/rectified/"
# path to LITIV dataset with rectified images
rectified_litiv_root: "/store/dabeaq/datasets/litiv/bilodeauIR/Dataset/"
# path to the generated dataset (by dataset.py) containing train, validation and test folder
data_root: "../../"
# output directory for dataset train-validation-test split
output_dataset: "/home/travail/dabeaq/litiv/masters/pbvs2019/cnn-rgbir/"
# path to generated disparity files (.bin files generated by patch_generator program)
disp_root: "/home/travail/dabeaq/litiv/stereo/patch_generator/"
# path to pretrained disparity files
pretrain_disp_root: "/home/travail/dabeaq/litiv/masters/pbvs2019/cnn-rgbir/stereo/pretrain/disparity_locations/"
# path to pretrained parameters folder (weights and batch norm. statistics)
pretrain_param_root: "/home/travail/dabeaq/litiv/masters/pbvs2019/cnn-rgbir/stereo/pretrain/parameters/"
# gpu id
gpu_id: 0
# half width (size of small square patch)
half_width: 18
# half range (width of larger patch)
half_range: 60
#batch size
bs: 64
# test batch size
tb: 100
# learning rate
learning_rate: 0.001
# learning rate decay
decay: 0.0000001
# weight decay
weight_decay: 0.0005
# momentum
momentum: 0.9
# epoch step
epoch_step: 40
# weight epoch
weight_epoch: 10
# max epoch
max_epoch: 200
# iter per epoch
iter: 200
# number of validation points
validation_points: 10000
# optimization method
opt: 'adam'
# fold1 used in our experiments
fold1:
id: 1
weights: "weights_fold1.t7"
bn: "bn_fold1.t7"
train: "train_fold1.bin"
validation: "validation_fold1.bin"
test: "test_fold1.bin"
dataset: "dataset_fold1"
train_nb: 952
validation_nb: 60
test_nb: 106
# fold2 used in our experiments
fold2:
id: 2
weights: "weights_fold2.t7"
bn: "bn_fold2.t7"
train: "train_fold2.bin"
validation: "validation_fold2.bin"
test: "test_fold2.bin"
dataset: "dataset_fold2"
train_nb: 880
validation_nb: 60
test_nb: 178
# fold3 used in our experiments
fold3:
id: 3
weights: "weights_fold3.t7"
bn: "bn_fold3.t7"
train: "train_fold3.bin"
validation: "validation_fold3.bin"
test: "test_fold3.bin"
dataset: "dataset_fold3"
train_nb: 894
validation_nb: 90
test_nb: 134
# to train/test on new data
custom:
id: 4
weights: ""
bn: ""
train: "train.bin"
validation: "validation.bin"
test: "test.bin"
dataset: "dataset"
train_nb: 0
validation_nb: 0
test_nb: 0