-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.yaml
100 lines (93 loc) · 3.09 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
Youshu:
data_path: './datasets'
batch_size_train: 2048 # the batch size for training
batch_size_test: 2048 # the batch size for testing
topk: [10, 20, 40, 80] # the topks metrics for evaluation
neg_num: 1 # number of negatives used for BPR loss. All the experiments use 1.
# search hyperparameters
# the following are the best settings
aug_type: "ED" # options: ED, MD, OP
ed_interval: 1 # by how many epochs to dropout edge, default is 1
embedding_sizes: [64] # the embedding size for user, bundle, and item
num_layerss: [1] # number of layers for the infomation progagation over the item- and bundle-level graphs
# the following dropout rates are with respect to the "aug_type", i.e., if aug_type is ED, the following dropout rates are for ED.
item_level_ratios: [0.2] # the dropout ratio for item-view graph
bundle_level_ratios: [0.2] # the dropout ratio for bundle-view graph
bundle_agg_ratios: [0.2] # the dropout ratio for bundle-item affiliation graph
lrs: [1.0e-3] # learning rate
l2_regs: [1.0e-4] # the l2 regularization weight: lambda_2
bpr_lambdas: [0.5] # the contrastive loss weight: lambda_1
c_lambdas1: [0.005] # the contrastive loss weight: lambda_1
c_lambdas2: [0.005] # the contrastive loss weight: lambda_2
c_lambdas3: [0.02] # the contrastive loss weight: lambda_3
c_temps: [0.25] # the temperature in the contrastive loss: tau
c_temps1: [0.1]
epochs: 100 # number of epochs to train
test_interval: 5 # by how many epochs to run the validation and testing.
ub_p_drop : 0.2
mix_ratio : 0.8
ssl_reg : 0.06
sim_restruciton : 0.28
sim_restruciton_u : 0.26
sim_weight : 0.95
status: 0
NetEase:
data_path: './datasets'
batch_size_train: 2048
batch_size_test: 2048
topk: [10, 20, 40, 80]
neg_num: 1
# the following are the best settings
aug_type: "ED"
ed_interval: 1
embedding_sizes: [64]
num_layerss: [1]
lrs: [1.0e-3]
item_level_ratios: [0.1]
bundle_level_ratios: [0.3]
bundle_agg_ratios: [0.1]
l2_regs: [1.0e-4]
c_lambdas1: [ 0.01] # the contrastive loss weight: lambda_1
c_lambdas2: [ 0.01] # the contrastive loss weight: lambda_2
c_lambdas3: [ 0.16] # the contrastive loss weight: lambda_3
c_temps: [0.25]
c_temps1: [0.1]
epochs: 50
test_interval: 5
ub_p_drop : 0.2
mix_ratio : 0.8
ssl_reg : 0.06
sim_restruciton: 0.26
sim_restruciton_u: 0.44
sim_weight: 0.95
status: 0
iFashion:
data_path: './datasets'
batch_size_train: 2048
batch_size_test: 2048
topk: [10, 20, 40, 80]
neg_num: 1
# the following are the best settings
aug_type: "ED"
ed_interval: 1
embedding_sizes: [64]
num_layerss: [1]
lrs: [1.0e-3]
item_level_ratios: [0.2]
bundle_level_ratios: [0.2]
bundle_agg_ratios: [0]
l2_regs: [4.0e-5]
c_lambdas1: [ 0.6] # the contrastive loss weight: lambda_1
c_lambdas2: [ 0.02] # the contrastive loss weight: lambda_2
c_lambdas3: [ 0.4] # the contrastive loss weight: lambda_3
c_temps: [0.2]
c_temps1: [0.1]
epochs: 50
test_interval: 5
ub_p_drop : 0.2
mix_ratio : 0.8
ssl_reg : 0.06
sim_restruciton: 0.4
sim_restruciton_u: 0.44
sim_weight: 0.95
status: 0