Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
dragen1860 committed Apr 30, 2019
1 parent 7a45f17 commit 753e563
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 23 deletions.
11 changes: 9 additions & 2 deletions README.MD
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# Graph Convolution Network for TF2
# Graph Convolution Network for PyTorch

Pytorch 1.0 with python 3.7.

GCN implementation for paper: [Semi-Supervised Classification with Graph Convolutional Networks](https://arxiv.org/pdf/1609.02907.pdf)

Expand All @@ -7,7 +9,12 @@ GCN implementation for paper: [Semi-Supervised Classification with Graph Convolu
| dataset | Citeseea | Cora | Pubmed | NELL |
|---------------|----------|------|--------|------|
| GCN(official) | 70.3 | 81.5 | 79.0 | 66.0 |
| This repo. | | 81.8 | 78.9 | |
| This repo. | | 81.7 | 78.7 | |

Hints:
To achieve 87.7% on `Cora` dataset, pytorch need 400 epochs.
and it train relatively slower on `Pubmed` dataset.


# HOWTO
```
Expand Down
14 changes: 7 additions & 7 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@
args = argparse.ArgumentParser()
args.add_argument('--dataset', default='cora')
args.add_argument('--model', default='gcn')
args.add_argument('--learning_rate', default=0.01)
args.add_argument('--epochs', default=200)
args.add_argument('--hidden1', default=16)
args.add_argument('--dropout', default=0.5)
args.add_argument('--weight_decay', default=5e-4)
args.add_argument('--early_stopping', default=10)
args.add_argument('--max_degree', default=3)
args.add_argument('--learning_rate', type=float, default=0.01)
args.add_argument('--epochs', type=int, default=200)
args.add_argument('--hidden', type=int, default=16)
args.add_argument('--dropout', type=float, default=0.5)
args.add_argument('--weight_decay', type=float, default=5e-4)
args.add_argument('--early_stopping', type=int, default=10)
args.add_argument('--max_degree', type=int, default=3)


args = args.parse_args()
Expand Down
9 changes: 5 additions & 4 deletions model.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from torch.nn import functional as F
from layer import GraphConvolution

from config import args

class GCN(nn.Module):

Expand All @@ -18,14 +19,14 @@ def __init__(self, input_dim, output_dim, num_features_nonzero):
print('num_features_nonzero:', num_features_nonzero)


self.layers = nn.Sequential(GraphConvolution(self.input_dim, 16, num_features_nonzero,
self.layers = nn.Sequential(GraphConvolution(self.input_dim, args.hidden, num_features_nonzero,
activation=F.relu,
dropout=0.5,
dropout=args.dropout,
is_sparse_inputs=True),

GraphConvolution(16, 7, num_features_nonzero,
GraphConvolution(args.hidden, output_dim, num_features_nonzero,
activation=F.relu,
dropout=0.5,
dropout=args.dropout,
is_sparse_inputs=False),

)
Expand Down
Binary file modified res/screen.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
17 changes: 7 additions & 10 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,13 @@
print('y:', y_train.shape, y_val.shape, y_test.shape)
print('mask:', train_mask.shape, val_mask.shape, test_mask.shape)



# D^-1@X
features = preprocess_features(features) # [49216, 2], [49216], [2708, 1433]
print('features coordinates::', features[0].shape)
print('features data::', features[1].shape)
print('features shape::', features[2])
supports = preprocess_adj(adj)

device = torch.device('cuda')
train_label = torch.from_numpy(y_train).long().to(device)
num_classes = train_label.shape[1]
train_label = train_label.argmax(dim=1)
train_mask = torch.from_numpy(train_mask.astype(np.int)).to(device)
val_label = torch.from_numpy(y_val).long().to(device)
Expand All @@ -50,14 +46,15 @@
v = torch.from_numpy(supports[1]).to(device)
support = torch.sparse.FloatTensor(i.t(), v, supports[2]).float().to(device)

print(feature)
print(support)
print('x :', feature)
print('sp:', support)
num_features_nonzero = feature._nnz()
dropout = args.dropout
feat_dim = feature.shape[1]


net = GCN(1433, 7, num_features_nonzero)
net = GCN(feat_dim, num_classes, num_features_nonzero)
net.to(device)
optimizer = optim.Adam(net.parameters(), lr=1e-2)
optimizer = optim.Adam(net.parameters(), lr=args.learning_rate)

net.train()
for epoch in range(args.epochs):
Expand Down

0 comments on commit 753e563

Please sign in to comment.