-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.yaml
92 lines (92 loc) · 1.54 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
paradigm_params:
base:
events: null
tmin: 0.0
tmax: null
baseline: null
resample: 128
n_classes: 4
channels:
- FC5
- FC1
- FC2
- FC6
- C3
- C4
- CP5
- CP1
- CP2
- CP6
- FC3
- FCz
- FC4
- C5
- C1
- C2
- C6
- CP3
- CPz
- CP4
- FFC5h
- FFC3h
- FFC4h
- FFC6h
- FCC5h
- FCC3h
- FCC4h
- FCC6h
- CCP5h
- CCP3h
- CCP4h
- CCP6h
- CPP5h
- CPP3h
- CPP4h
- CPP6h
- FFC1h
- FFC2h
- FCC1h
- FCC2h
- CCP1h
- CCP2h
- CPP1h
- CPP2h
single_band:
fmin: 0.5
fmax: 40.0
filter_bank:
filters:
- [4, 8]
- [8, 12]
- [12, 16]
- [16, 20]
- [20, 24]
- [24, 28]
- [28, 32]
- [32, 36]
- [36, 40]
evaluation_params:
base:
random_state: 12
within_session:
n_perms: null
data_size: null
# data_size:
# policy: per_class
# value: [1, 2, 4, 8, 16, 32, 64, 96]
# n_perms: [50, 34, 23, 15, 10, 7, 5, 5] # When the training data is sparse, peform more permutations than when we have a lot of data
net_params:
# random_state: 12
max_epochs: 1500
lr: 0.001 # this is actually the maximal learning rate of the OneCycle LR scheduler.
module:
max_lr: 0.1
pool_mode: mean
F1: 8
D: 2
F2: 16
kernel_length: 64
third_kernel_size: (8, 4)
drop_prob: 0.5
dataloader_params:
batch_size: 50