-
Notifications
You must be signed in to change notification settings - Fork 192
/
Copy pathconfig.json
137 lines (127 loc) · 3.66 KB
/
config.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
{
"name": "PICK_Default",
"run_id":"test",
"distributed":true,
"local_world_size":4,
"local_rank":-1,
"model_arch": {
"type": "PICKModel",
"args": {
"embedding_kwargs":{
"num_embeddings": -1,
"embedding_dim": 512
},
"encoder_kwargs":{
"char_embedding_dim":-1,
"out_dim": 512,
"nheaders": 4,
"nlayers": 3,
"feedforward_dim": 1024,
"dropout": 0.1,
"image_encoder": "resnet50",
"roi_pooling_mode": "roi_align",
"roi_pooling_size": [7,7]
},
"graph_kwargs":{
"in_dim":-1,
"out_dim":-1,
"eta": 1,
"gamma": 1,
"learning_dim": 128,
"num_layers": 2
},
"decoder_kwargs":{
"bilstm_kwargs":{
"input_size": -1,
"hidden_size": 512,
"num_layers": 2,
"dropout": 0.1,
"bidirectional": true,
"batch_first": true
},
"mlp_kwargs":{
"in_dim": -1,
"out_dim": -1,
"dropout": 0.1
},
"crf_kwargs":{
"num_tags":-1
}
}
}
},
"train_dataset": {
"type": "PICKDataset",
"args": {
"files_name":"/home/Wen/data/code/PICK/PICK-pytorch/data/data_examples_root/train_samples_list.csv",
"boxes_and_transcripts_folder":"boxes_and_transcripts",
"images_folder":"images",
"entities_folder":"entities",
"iob_tagging_type":"box_and_within_box_level",
"resized_image_size": [480, 960],
"ignore_error": false
}
},
"validation_dataset": {
"type": "PICKDataset",
"args": {
"files_name":"/home/Wen/data/code/PICK/PICK-pytorch/data/data_examples_root/train_samples_list.csv",
"boxes_and_transcripts_folder":"boxes_and_transcripts",
"images_folder":"images",
"entities_folder":"entities",
"iob_tagging_type":"box_and_within_box_level",
"resized_image_size": [480, 960],
"ignore_error": false
}
},
"train_data_loader": {
"type": "DataLoader",
"args":{
"batch_size": 4,
"shuffle": true,
"drop_last": true,
"num_workers": 8,
"pin_memory":true
}
},
"val_data_loader": {
"type": "DataLoader",
"args":{
"batch_size": 4,
"shuffle": false,
"drop_last": false,
"num_workers": 8,
"pin_memory":true
}
},
"optimizer": {
"type": "Adam",
"args":{
"lr": 0.0001,
"weight_decay": 0,
"amsgrad": true
}
},
"lr_scheduler": {
"type": "StepLR",
"args": {
"step_size": 30,
"gamma": 0.1
}
},
"trainer": {
"epochs": 100,
"gl_loss_lambda": 0.01,
"log_step_interval": 10,
"val_step_interval": 50,
"save_dir": "saved/",
"save_period": 20,
"log_verbosity": 2,
"monitor": "max overall-mEF",
"monitor_open": true,
"early_stop": 40,
"anomaly_detection": false,
"tensorboard": false,
"sync_batch_norm":true
}
}