-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
ec2df05
commit e4d898e
Showing
31 changed files
with
1,246 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
import torch | ||
from utils_file.dataset_utils import mixed_dataset | ||
from utils_file.utils import * | ||
import networkx as nx | ||
from torch_geometric.data import Data, Batch | ||
from torch_geometric.loader import DataLoader | ||
from torch.utils.tensorboard import SummaryWriter | ||
|
||
class config_gap: | ||
def __init__(self,data='ss1',batch_size=1,mode = 'train',is_plot=False): | ||
self.model = "spectral for graph embedding" | ||
self.loader,self.dataset = mixed_dataset(data,batch_size=batch_size) | ||
self.data = data | ||
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | ||
self.mode = mode | ||
self.is_plot = is_plot | ||
self.plot_path = None | ||
self.baseline = 0 | ||
self.balance = 0 | ||
|
||
if self.model=="spectral for graph embedding" and self.mode=='train': | ||
# spectral embedding optimizer == se_opt | ||
self.se_opt = {'lr':0.001,'weight_decay':5e-5} | ||
# partitioning embedding optimizer == pm_opt | ||
self.pe_opt = {'lr':0.01,'weight_decay':5e-6} | ||
self.is_se = True | ||
self.is_pe = True | ||
self.se_params = {'l':32,'pre':4,'post':4,'coarsening_threshold':2,'activation':'tanh','lins':[16,32,32,16,16]} | ||
self.pe_params = {'l':32,'pre':4,'post':4,'coarsening_threshold':2,'activation':'tanh','lins':[16,16,16,16,16]} | ||
self.se_epoch = 200 | ||
self.pe_epoch = 200 | ||
self.se_savepath = 'spectral_weights/spectral_weights_ss1.pt' | ||
elif self.model=="spectral graph embedding" and self.mode=='test': | ||
pass | ||
|
||
|
||
# device = 'cpu' | ||
# A = input_matrix() | ||
# print(A.toarray()) | ||
# row = A.row | ||
# col = A.col | ||
# rowcols = np.array([row,col]) | ||
# edges = torch.tensor(rowcols,dtype=torch.long) | ||
# nodes = torch.randn(A.shape[0],2) | ||
# data = Data(x=nodes,edge_index=edges) | ||
# print(data) | ||
# dataset = [] | ||
# dataset.append(data) | ||
# loader = DataLoader(dataset,batch_size=1,shuffle=True) | ||
# print(loader) | ||
# for d in loader: | ||
# print(laplacian(d)) | ||
|
||
|
||
# print(g.edge_index) | ||
# print(laplacian(A)) | ||
# config = config_gap() | ||
# print(config.dataset) | ||
# for d in config.loader: | ||
# print(d) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,157 @@ | ||
import math | ||
import numpy as np | ||
import torch | ||
import scipy.sparse as sp | ||
from torch.nn.parameter import Parameter | ||
from torch.nn.modules.module import Module | ||
import torch.nn.functional as F | ||
import torch.nn as nn | ||
from utils_file import utils | ||
import tqdm | ||
from typing import Tuple, Union | ||
|
||
import torch.nn.functional as F | ||
from torch import Tensor | ||
from torch_sparse import SparseTensor, matmul | ||
|
||
from torch_geometric.nn.conv import MessagePassing | ||
from torch_geometric.nn.dense.linear import Linear | ||
from torch_geometric.typing import Adj, OptPairTensor, Size | ||
|
||
class GraphConvolution(Module): | ||
"""GCN layers | ||
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 | ||
forward: | ||
H: x feature matrix | ||
A: norm adjacent matrix | ||
W: weight matrix | ||
""" | ||
|
||
def __init__(self, in_features, out_features, bias=True): | ||
super(GraphConvolution, self).__init__() | ||
self.in_features = in_features | ||
self.out_features = out_features | ||
self.weight = Parameter(torch.FloatTensor(in_features, out_features)) | ||
if bias: | ||
self.bias = Parameter(torch.FloatTensor(out_features)) | ||
else: | ||
self.register_parameter('bias', None) | ||
self.reset_parameters() | ||
|
||
def reset_parameters(self): | ||
stdv = 1. / math.sqrt(self.weight.size(1)) | ||
self.weight.data.uniform_(-stdv, stdv) | ||
if self.bias is not None: | ||
self.bias.data.uniform_(-stdv, stdv) | ||
|
||
def forward(self,H,A): | ||
W = self.weight | ||
b = self.bias | ||
|
||
HW = torch.mm(H,W) | ||
|
||
# D^{-1/2}AD^{-1/2}XW | ||
AHW = torch.sparse.mm(A,HW) | ||
if self.bias is not None: | ||
return AHW + b | ||
else: | ||
return AHW | ||
|
||
def __repr__(self): | ||
return self.__class__.__name__ + ' (' \ | ||
+ str(self.in_features) + ' -> ' \ | ||
+ str(self.out_features) + ')' | ||
|
||
class SAGEConv(MessagePassing): | ||
r"""The GraphSAGE operator from the `"Inductive Representation Learning on | ||
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper | ||
.. math:: | ||
\mathbf{x}^{\prime}_i = \mathbf{W}_1 \mathbf{x}_i + \mathbf{W}_2 \cdot | ||
\mathrm{mean}_{j \in \mathcal{N(i)}} \mathbf{x}_j | ||
Args: | ||
in_channels (int or tuple): Size of each input sample, or :obj:`-1` to | ||
derive the size from the first input(s) to the forward method. | ||
A tuple corresponds to the sizes of source and target | ||
dimensionalities. | ||
out_channels (int): Size of each output sample. | ||
normalize (bool, optional): If set to :obj:`True`, output features | ||
will be :math:`\ell_2`-normalized, *i.e.*, | ||
:math:`\frac{\mathbf{x}^{\prime}_i} | ||
{\| \mathbf{x}^{\prime}_i \|_2}`. | ||
(default: :obj:`False`) | ||
root_weight (bool, optional): If set to :obj:`False`, the layer will | ||
not add transformed root node features to the output. | ||
(default: :obj:`True`) | ||
bias (bool, optional): If set to :obj:`False`, the layer will not learn | ||
an additive bias. (default: :obj:`True`) | ||
**kwargs (optional): Additional arguments of | ||
:class:`torch_geometric.nn.conv.MessagePassing`. | ||
Shapes: | ||
- **inputs:** | ||
node features :math:`(|\mathcal{V}|, F_{in})` or | ||
:math:`((|\mathcal{V_s}|, F_{s}), (|\mathcal{V_t}|, F_{t}))` | ||
if bipartite, | ||
edge indices :math:`(2, |\mathcal{E}|)` | ||
- **outputs:** node features :math:`(|\mathcal{V}|, F_{out})` or | ||
:math:`(|\mathcal{V_t}|, F_{out})` if bipartite | ||
""" | ||
def __init__(self, in_channels: Union[int, Tuple[int, int]], | ||
out_channels: int, normalize: bool = False, | ||
root_weight: bool = True, bias: bool = True, **kwargs): | ||
kwargs.setdefault('aggr', 'mean') | ||
super().__init__(**kwargs) | ||
|
||
self.in_channels = in_channels | ||
self.out_channels = out_channels | ||
self.normalize = normalize | ||
self.root_weight = root_weight | ||
|
||
if isinstance(in_channels, int): | ||
in_channels = (in_channels, in_channels) | ||
|
||
self.lin_l = Linear(in_channels[0], out_channels, bias=bias) | ||
if self.root_weight: | ||
self.lin_r = Linear(in_channels[1], out_channels, bias=False) | ||
|
||
self.reset_parameters() | ||
|
||
def reset_parameters(self): | ||
self.lin_l.reset_parameters() | ||
if self.root_weight: | ||
self.lin_r.reset_parameters() | ||
|
||
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, | ||
size: Size = None) -> Tensor: | ||
"""""" | ||
if isinstance(x, Tensor): | ||
x: OptPairTensor = (x, x) | ||
|
||
# propagate_type: (x: OptPairTensor) | ||
out = self.propagate(edge_index, x=x, size=size) | ||
out = self.lin_l(out) | ||
|
||
x_r = x[1] | ||
if self.root_weight and x_r is not None: | ||
out += self.lin_r(x_r) | ||
|
||
if self.normalize: | ||
out = F.normalize(out, p=2., dim=-1) | ||
|
||
return out | ||
|
||
def message(self, x_j: Tensor) -> Tensor: | ||
return x_j | ||
|
||
def message_and_aggregate(self, adj_t: SparseTensor, | ||
x: OptPairTensor) -> Tensor: | ||
adj_t = adj_t.set_value(None, layout=None) | ||
return matmul(adj_t, x[0], reduce=self.aggr) | ||
|
||
|
||
|
||
|
Binary file added
BIN
+40 Bytes
log/imagesensor/events.out.tfevents.1662007020.DESKTOP-D775MV0.20760.0
Binary file not shown.
Binary file added
BIN
+3.14 KB
log/imagesensor/log_imagesensor_cuts/events.out.tfevents.1662007480.DESKTOP-D775MV0.20760.1
Binary file not shown.
Binary file added
BIN
+3.14 KB
log/imagesensor/log_imagesensor_metis/events.out.tfevents.1662007480.DESKTOP-D775MV0.20760.2
Binary file not shown.
Binary file not shown.
Binary file added
BIN
+2.73 KB
log/nv1/log_nv1__cuts/events.out.tfevents.1662370650.DESKTOP-D775MV0.13420.1
Binary file not shown.
Binary file added
BIN
+2.73 KB
log/nv1/log_nv1__metis/events.out.tfevents.1662370650.DESKTOP-D775MV0.13420.2
Binary file not shown.
Binary file not shown.
Binary file added
BIN
+2.85 KB
log/power9/log_power9_cuts/events.out.tfevents.1661998492.DESKTOP-D775MV0.15268.1
Binary file not shown.
Binary file added
BIN
+2.85 KB
log/power9/log_power9_metis/events.out.tfevents.1661998492.DESKTOP-D775MV0.15268.2
Binary file not shown.
Binary file not shown.
Binary file added
BIN
+3.03 KB
log/radiation/log_radiation_cuts/events.out.tfevents.1662004342.DESKTOP-D775MV0.7696.1
Binary file not shown.
Binary file added
BIN
+3.03 KB
log/radiation/log_radiation_metis/events.out.tfevents.1662004342.DESKTOP-D775MV0.7696.2
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added
BIN
+2.67 KB
log/ss1/log_ss1_cuts/events.out.tfevents.1661996730.DESKTOP-D775MV0.4204.1
Binary file not shown.
Binary file added
BIN
+2.67 KB
log/ss1/log_ss1_metis/events.out.tfevents.1661996730.DESKTOP-D775MV0.4204.2
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added
BIN
+1.87 KB
log/test1/log_test1_cuts/events.out.tfevents.1662012056.DESKTOP-D775MV0.15544.1
Binary file not shown.
Binary file added
BIN
+1.87 KB
log/test1/log_test1_metis/events.out.tfevents.1662012056.DESKTOP-D775MV0.15544.2
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
import torch | ||
import torch.nn as nn | ||
from utils_file.utils import * | ||
|
||
def loss_normalized_cut(y_pred, graph): | ||
y = y_pred | ||
d = degree(graph.edge_index[0], num_nodes=y.size(0)) | ||
gamma = y.t() @ d | ||
c = torch.sum(y[graph.edge_index[0], 0] * y[graph.edge_index[1], 1]) | ||
return torch.sum(torch.div(c, gamma)) | ||
|
||
def loss_embedding(x,L): | ||
mse=nn.MSELoss() | ||
l=torch.tensor(0.) | ||
for i in range(x.shape[1]): | ||
l+=residual(x[:,i],L,mse) | ||
return l |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,129 @@ | ||
import torch | ||
import torch.nn as nn | ||
from layers import * | ||
from torch_geometric.nn import avg_pool, graclus | ||
from torch_geometric.data import Batch | ||
from layers import SAGEConv | ||
|
||
|
||
# Neural network for the embedding module | ||
class ModelSpectral(torch.nn.Module): | ||
def __init__(self,se_params,device): | ||
super(ModelSpectral,self).__init__() | ||
self.l = se_params.get('l') | ||
self.pre = se_params.get('pre') | ||
self.post = se_params.get('post') | ||
self.coarsening_threshold = se_params.get('coarsening_threshold') | ||
self.activation = getattr(torch, se_params.get('activation')) | ||
self.lins = se_params.get('lins') | ||
|
||
self.conv_post = nn.ModuleList( | ||
[SAGEConv(self.l, self.l) for i in range(self.post)] | ||
) | ||
self.conv_coarse = SAGEConv(2,self.l) | ||
self.lins1=nn.Linear(self.l,self.lins[0]) | ||
self.lins2=nn.Linear(self.lins[0],self.lins[1]) | ||
self.lins3=nn.Linear(self.lins[1],self.lins[2]) | ||
self.final=nn.Linear(self.lins[2],2) | ||
self.device = device | ||
|
||
def forward(self, graph): | ||
x, edge_index, batch = graph.x, graph.edge_index, graph.batch | ||
unpool_info = [] | ||
x_info=[] | ||
cluster_info=[] | ||
edge_info=[] | ||
while x.size()[0] > self.coarsening_threshold: | ||
cluster = graclus(edge_index,num_nodes=x.shape[0]) | ||
cluster_info.append(cluster) | ||
edge_info.append(edge_index) | ||
gc = avg_pool(cluster, Batch(batch=batch, x=x, edge_index=edge_index)) | ||
x, edge_index, batch = gc.x, gc.edge_index, gc.batch | ||
# coarse iterations | ||
x=torch.eye(2).to(self.device) | ||
x=self.conv_coarse(x,edge_index) | ||
x=self.activation(x) | ||
while edge_info: | ||
# un-pooling / interpolation / prolongation / refinement | ||
edge_index = edge_info.pop() | ||
output, inverse = torch.unique(cluster_info.pop(), return_inverse=True) | ||
x = x[inverse] | ||
# post-smoothing | ||
for i in range(self.post): | ||
x = self.activation(self.conv_post[i](x, edge_index)) | ||
x=self.lins1(x) | ||
x=self.activation(x) | ||
x=self.lins2(x) | ||
x=self.activation(x) | ||
x=self.lins3(x) | ||
x=self.activation(x) | ||
x=self.final(x) | ||
x,_=torch.linalg.qr(x,mode='reduced') | ||
return x | ||
|
||
# Neural network for the partitioning module | ||
class ModelPartitioning(torch.nn.Module): | ||
def __init__(self,pe_params): | ||
super(ModelPartitioning,self).__init__() | ||
|
||
self.l = pe_params.get('l') | ||
self.pre = pe_params.get('pre') | ||
self.post = pe_params.get('post') | ||
self.coarsening_threshold = pe_params.get('coarsening_threshold') | ||
self.activation = getattr(torch, pe_params.get('activation')) | ||
self.lins = pe_params.get('lins') | ||
|
||
self.conv_first = SAGEConv(1, self.l) | ||
self.conv_pre = nn.ModuleList( | ||
[SAGEConv(self.l, self.l) for i in range(self.pre)] | ||
) | ||
self.conv_post = nn.ModuleList( | ||
[SAGEConv(self.l, self.l) for i in range(self.post)] | ||
) | ||
self.conv_coarse = SAGEConv(self.l,self.l) | ||
|
||
self.lins1=nn.Linear(self.l,self.lins[0]) | ||
self.lins2=nn.Linear(self.lins[0],self.lins[1]) | ||
self.lins3=nn.Linear(self.lins[1],self.lins[2]) | ||
self.final=nn.Linear(self.lins[4],2) | ||
|
||
def forward(self, graph): | ||
x, edge_index, batch = graph.x, graph.edge_index, graph.batch | ||
x = self.activation(self.conv_first(x, edge_index)) | ||
unpool_info = [] | ||
x_info=[] | ||
cluster_info=[] | ||
edge_info=[] | ||
batches=[] | ||
while x.size()[0] > self.coarsening_threshold: | ||
# pre-smoothing | ||
for i in range(self.pre): | ||
x = self.activation(self.conv_pre[i](x, edge_index)) | ||
# pooling / coarsening / restriction | ||
x_info.append(x) | ||
batches.append(batch) | ||
cluster = graclus(edge_index,num_nodes=x.shape[0]) | ||
cluster_info.append(cluster) | ||
edge_info.append(edge_index) | ||
gc = avg_pool(cluster, Batch(batch=batch, x=x, edge_index=edge_index)) | ||
x, edge_index, batch = gc.x, gc.edge_index, gc.batch | ||
# coarse iterations | ||
x = self.activation(self.conv_coarse(x,edge_index)) | ||
while edge_info: | ||
# un-pooling / interpolation / prolongation / refinement | ||
edge_index = edge_info.pop() | ||
output, inverse = torch.unique(cluster_info.pop(), return_inverse=True) | ||
x = (x[inverse] + x_info.pop())/2 | ||
# post-smoothing | ||
for i in range(self.post): | ||
x = self.activation(self.conv_post[i](x, edge_index)) | ||
x=self.lins1(x) | ||
x=self.activation(x) | ||
x=self.lins2(x) | ||
x=self.activation(x) | ||
x=self.lins3(x) | ||
x=self.activation(x) | ||
x=self.final(x) | ||
x=torch.softmax(x,dim=1) | ||
return x | ||
|
Oops, something went wrong.