Skip to content

Commit

Permalink
Smaller gnn gives much stabler training
Browse files Browse the repository at this point in the history
  • Loading branch information
weihua916 committed Mar 14, 2021
1 parent d45f707 commit ead99f4
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 8 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,4 @@ dataset/
**.json
**/checkpoint
**/events.out.*
**/test*
8 changes: 4 additions & 4 deletions examples/lsc/pcqm4m/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ python main_mlpfp.py --log_dir $LOG_DIR --checkpoint_dir $CHECKPOINT_DIR --save_

| Model |Valid MAE | Test MAE* | \#Parameters | Hardware |
|:------------------ |:-------------- |:---------------| --------------:|----------|
| GIN | 0.1484 | 0.1621 | 11.6M | GeForce RTX 2080 (11GB GPU) |
| GIN-virtual | 0.1382 | 0.1465 | 21.4M | GeForce RTX 2080 (11GB GPU) |
| GCN | 0.1588 | 0.1710 | 5.9M | GeForce RTX 2080 (11GB GPU) |
| GCN-virtual | 0.1459 | 0.1594 | 15.7M | GeForce RTX 2080 (11GB GPU) |
| GIN | 0.1536 | 0.1678 | 3.8M | GeForce RTX 2080 (11GB GPU) |
| GIN-virtual | 0.1396 | 0.1487 | 6.7M | GeForce RTX 2080 (11GB GPU) |
| GCN | 0.1684 | 0.1838 | 2.0M | GeForce RTX 2080 (11GB GPU) |
| GCN-virtual | 0.1510 | 0.1579 | 4.9M | GeForce RTX 2080 (11GB GPU) |
| MLP+Fingerprint | 0.2044 | 0.2068 | 16.1M | GeForce RTX 2080 (11GB GPU) |

\* Test MAE is evaluated on the **hidden test set.**
Expand Down
11 changes: 7 additions & 4 deletions examples/lsc/pcqm4m/main_gnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
import argparse
import time
import numpy as np
import random


### importing OGB-LSC
from ogb.lsc import PygPCQM4MDataset, PCQM4MEvaluator
Expand Down Expand Up @@ -84,10 +86,10 @@ def main():
help='graph pooling strategy mean or sum (default: sum)')
parser.add_argument('--drop_ratio', type=float, default=0,
help='dropout ratio (default: 0)')
parser.add_argument('--num_layers', type=int, default=7,
help='number of GNN message passing layers (default: 7)')
parser.add_argument('--emb_dim', type=int, default=900,
help='dimensionality of hidden units in GNNs (default: 900)')
parser.add_argument('--num_layers', type=int, default=5,
help='number of GNN message passing layers (default: 5)')
parser.add_argument('--emb_dim', type=int, default=600,
help='dimensionality of hidden units in GNNs (default: 600)')
parser.add_argument('--train_subset', action='store_true')
parser.add_argument('--batch_size', type=int, default=256,
help='input batch size for training (default: 256)')
Expand All @@ -106,6 +108,7 @@ def main():
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
random.seed(42)

device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")

Expand Down
2 changes: 2 additions & 0 deletions examples/lsc/pcqm4m/main_mlpfp.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import argparse
import time
import numpy as np
import random

from rdkit import Chem
from rdkit.Chem import AllChem
Expand Down Expand Up @@ -147,6 +148,7 @@ def main_mlp():
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
random.seed(42)

device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")

Expand Down

0 comments on commit ead99f4

Please sign in to comment.