From e4d898e882aa1d2d4eaac3a13010eb48faea2acf Mon Sep 17 00:00:00 2001 From: yushengzhai <841359931@qq.com> Date: Tue, 6 Sep 2022 09:55:38 +0800 Subject: [PATCH] 2022-9-6-9.55 --- config.py | 60 +++ layers.py | 157 +++++++ ...fevents.1662007020.DESKTOP-D775MV0.20760.0 | Bin 0 -> 40 bytes ...fevents.1662007480.DESKTOP-D775MV0.20760.1 | Bin 0 -> 3218 bytes ...fevents.1662007480.DESKTOP-D775MV0.20760.2 | Bin 0 -> 3218 bytes ...fevents.1662370082.DESKTOP-D775MV0.13420.0 | Bin 0 -> 40 bytes ...fevents.1662370650.DESKTOP-D775MV0.13420.1 | Bin 0 -> 2798 bytes ...fevents.1662370650.DESKTOP-D775MV0.13420.2 | Bin 0 -> 2798 bytes ...fevents.1661997887.DESKTOP-D775MV0.15268.0 | Bin 0 -> 40 bytes ...fevents.1661998492.DESKTOP-D775MV0.15268.1 | Bin 0 -> 2918 bytes ...fevents.1661998492.DESKTOP-D775MV0.15268.2 | Bin 0 -> 2918 bytes ...tfevents.1662002627.DESKTOP-D775MV0.7696.0 | Bin 0 -> 40 bytes ...tfevents.1662004342.DESKTOP-D775MV0.7696.1 | Bin 0 -> 3098 bytes ...tfevents.1662004342.DESKTOP-D775MV0.7696.2 | Bin 0 -> 3098 bytes ...tfevents.1661996726.DESKTOP-D775MV0.4204.0 | Bin 0 -> 40 bytes ...fevents.1662366113.DESKTOP-D775MV0.15528.0 | Bin 0 -> 40 bytes ...fevents.1662366420.DESKTOP-D775MV0.15524.0 | Bin 0 -> 40 bytes ...tfevents.1661996730.DESKTOP-D775MV0.4204.1 | Bin 0 -> 2738 bytes ...tfevents.1661996730.DESKTOP-D775MV0.4204.2 | Bin 0 -> 2738 bytes ...fevents.1662009066.DESKTOP-D775MV0.15544.0 | Bin 0 -> 40 bytes ...tfevents.1662014877.DESKTOP-D775MV0.1092.0 | Bin 0 -> 40 bytes ...fevents.1662012056.DESKTOP-D775MV0.15544.1 | Bin 0 -> 1918 bytes ...fevents.1662012056.DESKTOP-D775MV0.15544.2 | Bin 0 -> 1918 bytes losses.py | 17 + models.py | 129 ++++++ test.py | 117 +++++ test_pickle.py | 63 +++ testing.py | 16 + training.py | 186 ++++++++ utils_file/dataset_utils.py | 87 ++++ utils_file/utils.py | 414 ++++++++++++++++++ 31 files changed, 1246 insertions(+) create mode 100644 config.py create mode 100644 layers.py create mode 100644 log/imagesensor/events.out.tfevents.1662007020.DESKTOP-D775MV0.20760.0 create mode 100644 log/imagesensor/log_imagesensor_cuts/events.out.tfevents.1662007480.DESKTOP-D775MV0.20760.1 create mode 100644 log/imagesensor/log_imagesensor_metis/events.out.tfevents.1662007480.DESKTOP-D775MV0.20760.2 create mode 100644 log/nv1/events.out.tfevents.1662370082.DESKTOP-D775MV0.13420.0 create mode 100644 log/nv1/log_nv1__cuts/events.out.tfevents.1662370650.DESKTOP-D775MV0.13420.1 create mode 100644 log/nv1/log_nv1__metis/events.out.tfevents.1662370650.DESKTOP-D775MV0.13420.2 create mode 100644 log/power9/events.out.tfevents.1661997887.DESKTOP-D775MV0.15268.0 create mode 100644 log/power9/log_power9_cuts/events.out.tfevents.1661998492.DESKTOP-D775MV0.15268.1 create mode 100644 log/power9/log_power9_metis/events.out.tfevents.1661998492.DESKTOP-D775MV0.15268.2 create mode 100644 log/radiation/events.out.tfevents.1662002627.DESKTOP-D775MV0.7696.0 create mode 100644 log/radiation/log_radiation_cuts/events.out.tfevents.1662004342.DESKTOP-D775MV0.7696.1 create mode 100644 log/radiation/log_radiation_metis/events.out.tfevents.1662004342.DESKTOP-D775MV0.7696.2 create mode 100644 log/ss1/events.out.tfevents.1661996726.DESKTOP-D775MV0.4204.0 create mode 100644 log/ss1/events.out.tfevents.1662366113.DESKTOP-D775MV0.15528.0 create mode 100644 log/ss1/events.out.tfevents.1662366420.DESKTOP-D775MV0.15524.0 create mode 100644 log/ss1/log_ss1_cuts/events.out.tfevents.1661996730.DESKTOP-D775MV0.4204.1 create mode 100644 log/ss1/log_ss1_metis/events.out.tfevents.1661996730.DESKTOP-D775MV0.4204.2 create mode 100644 log/test1/events.out.tfevents.1662009066.DESKTOP-D775MV0.15544.0 create mode 100644 log/test1/events.out.tfevents.1662014877.DESKTOP-D775MV0.1092.0 create mode 100644 log/test1/log_test1_cuts/events.out.tfevents.1662012056.DESKTOP-D775MV0.15544.1 create mode 100644 log/test1/log_test1_metis/events.out.tfevents.1662012056.DESKTOP-D775MV0.15544.2 create mode 100644 losses.py create mode 100644 models.py create mode 100644 test.py create mode 100644 test_pickle.py create mode 100644 testing.py create mode 100644 training.py create mode 100644 utils_file/dataset_utils.py create mode 100644 utils_file/utils.py diff --git a/config.py b/config.py new file mode 100644 index 0000000..07a3408 --- /dev/null +++ b/config.py @@ -0,0 +1,60 @@ +import torch +from utils_file.dataset_utils import mixed_dataset +from utils_file.utils import * +import networkx as nx +from torch_geometric.data import Data, Batch +from torch_geometric.loader import DataLoader +from torch.utils.tensorboard import SummaryWriter + +class config_gap: + def __init__(self,data='ss1',batch_size=1,mode = 'train',is_plot=False): + self.model = "spectral for graph embedding" + self.loader,self.dataset = mixed_dataset(data,batch_size=batch_size) + self.data = data + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.mode = mode + self.is_plot = is_plot + self.plot_path = None + self.baseline = 0 + self.balance = 0 + + if self.model=="spectral for graph embedding" and self.mode=='train': + # spectral embedding optimizer == se_opt + self.se_opt = {'lr':0.001,'weight_decay':5e-5} + # partitioning embedding optimizer == pm_opt + self.pe_opt = {'lr':0.01,'weight_decay':5e-6} + self.is_se = True + self.is_pe = True + self.se_params = {'l':32,'pre':4,'post':4,'coarsening_threshold':2,'activation':'tanh','lins':[16,32,32,16,16]} + self.pe_params = {'l':32,'pre':4,'post':4,'coarsening_threshold':2,'activation':'tanh','lins':[16,16,16,16,16]} + self.se_epoch = 200 + self.pe_epoch = 200 + self.se_savepath = 'spectral_weights/spectral_weights_ss1.pt' + elif self.model=="spectral graph embedding" and self.mode=='test': + pass + + +# device = 'cpu' +# A = input_matrix() +# print(A.toarray()) +# row = A.row +# col = A.col +# rowcols = np.array([row,col]) +# edges = torch.tensor(rowcols,dtype=torch.long) +# nodes = torch.randn(A.shape[0],2) +# data = Data(x=nodes,edge_index=edges) +# print(data) +# dataset = [] +# dataset.append(data) +# loader = DataLoader(dataset,batch_size=1,shuffle=True) +# print(loader) +# for d in loader: +# print(laplacian(d)) + + +# print(g.edge_index) +# print(laplacian(A)) +# config = config_gap() +# print(config.dataset) +# for d in config.loader: +# print(d) \ No newline at end of file diff --git a/layers.py b/layers.py new file mode 100644 index 0000000..caf0d1f --- /dev/null +++ b/layers.py @@ -0,0 +1,157 @@ +import math +import numpy as np +import torch +import scipy.sparse as sp +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +import torch.nn.functional as F +import torch.nn as nn +from utils_file import utils +import tqdm +from typing import Tuple, Union + +import torch.nn.functional as F +from torch import Tensor +from torch_sparse import SparseTensor, matmul + +from torch_geometric.nn.conv import MessagePassing +from torch_geometric.nn.dense.linear import Linear +from torch_geometric.typing import Adj, OptPairTensor, Size + +class GraphConvolution(Module): + """GCN layers + + Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 + + forward: + H: x feature matrix + A: norm adjacent matrix + W: weight matrix + """ + + def __init__(self, in_features, out_features, bias=True): + super(GraphConvolution, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.FloatTensor(in_features, out_features)) + if bias: + self.bias = Parameter(torch.FloatTensor(out_features)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + stdv = 1. / math.sqrt(self.weight.size(1)) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.uniform_(-stdv, stdv) + + def forward(self,H,A): + W = self.weight + b = self.bias + + HW = torch.mm(H,W) + + # D^{-1/2}AD^{-1/2}XW + AHW = torch.sparse.mm(A,HW) + if self.bias is not None: + return AHW + b + else: + return AHW + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_features) + ' -> ' \ + + str(self.out_features) + ')' + +class SAGEConv(MessagePassing): + r"""The GraphSAGE operator from the `"Inductive Representation Learning on + Large Graphs" `_ paper + + .. math:: + \mathbf{x}^{\prime}_i = \mathbf{W}_1 \mathbf{x}_i + \mathbf{W}_2 \cdot + \mathrm{mean}_{j \in \mathcal{N(i)}} \mathbf{x}_j + + Args: + in_channels (int or tuple): Size of each input sample, or :obj:`-1` to + derive the size from the first input(s) to the forward method. + A tuple corresponds to the sizes of source and target + dimensionalities. + out_channels (int): Size of each output sample. + normalize (bool, optional): If set to :obj:`True`, output features + will be :math:`\ell_2`-normalized, *i.e.*, + :math:`\frac{\mathbf{x}^{\prime}_i} + {\| \mathbf{x}^{\prime}_i \|_2}`. + (default: :obj:`False`) + root_weight (bool, optional): If set to :obj:`False`, the layer will + not add transformed root node features to the output. + (default: :obj:`True`) + bias (bool, optional): If set to :obj:`False`, the layer will not learn + an additive bias. (default: :obj:`True`) + **kwargs (optional): Additional arguments of + :class:`torch_geometric.nn.conv.MessagePassing`. + + Shapes: + - **inputs:** + node features :math:`(|\mathcal{V}|, F_{in})` or + :math:`((|\mathcal{V_s}|, F_{s}), (|\mathcal{V_t}|, F_{t}))` + if bipartite, + edge indices :math:`(2, |\mathcal{E}|)` + - **outputs:** node features :math:`(|\mathcal{V}|, F_{out})` or + :math:`(|\mathcal{V_t}|, F_{out})` if bipartite + """ + def __init__(self, in_channels: Union[int, Tuple[int, int]], + out_channels: int, normalize: bool = False, + root_weight: bool = True, bias: bool = True, **kwargs): + kwargs.setdefault('aggr', 'mean') + super().__init__(**kwargs) + + self.in_channels = in_channels + self.out_channels = out_channels + self.normalize = normalize + self.root_weight = root_weight + + if isinstance(in_channels, int): + in_channels = (in_channels, in_channels) + + self.lin_l = Linear(in_channels[0], out_channels, bias=bias) + if self.root_weight: + self.lin_r = Linear(in_channels[1], out_channels, bias=False) + + self.reset_parameters() + + def reset_parameters(self): + self.lin_l.reset_parameters() + if self.root_weight: + self.lin_r.reset_parameters() + + def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, + size: Size = None) -> Tensor: + """""" + if isinstance(x, Tensor): + x: OptPairTensor = (x, x) + + # propagate_type: (x: OptPairTensor) + out = self.propagate(edge_index, x=x, size=size) + out = self.lin_l(out) + + x_r = x[1] + if self.root_weight and x_r is not None: + out += self.lin_r(x_r) + + if self.normalize: + out = F.normalize(out, p=2., dim=-1) + + return out + + def message(self, x_j: Tensor) -> Tensor: + return x_j + + def message_and_aggregate(self, adj_t: SparseTensor, + x: OptPairTensor) -> Tensor: + adj_t = adj_t.set_value(None, layout=None) + return matmul(adj_t, x[0], reduce=self.aggr) + + + + \ No newline at end of file diff --git a/log/imagesensor/events.out.tfevents.1662007020.DESKTOP-D775MV0.20760.0 b/log/imagesensor/events.out.tfevents.1662007020.DESKTOP-D775MV0.20760.0 new file mode 100644 index 0000000000000000000000000000000000000000..21b59dc449d0b99a58a392e2d6982ed0f6db6878 GIT binary patch literal 40 rcmb1OfPlsI-b$RZNA-5|9=YKt#hX-=n3<>NT9%quVrA5pQTGr4$P)~L literal 0 HcmV?d00001 diff --git a/log/imagesensor/log_imagesensor_cuts/events.out.tfevents.1662007480.DESKTOP-D775MV0.20760.1 b/log/imagesensor/log_imagesensor_cuts/events.out.tfevents.1662007480.DESKTOP-D775MV0.20760.1 new file mode 100644 index 0000000000000000000000000000000000000000..2bae4a3e6e9f6f27154b5b28ba958a219df43d38 GIT binary patch literal 3218 zcmZ|QYfzMB6bEqK4G9c^0``5lC>Oac!4m4qWmFjBh#~~Z2-M=ZjXN6+w_pP`XSf8Y z$i+!T3vfx2S#2Ux3&(I_CI=xVXR}2qFwEhDl`~pk?oc+p9{qAYocHtjJ?EMCoc}!1 zqNiU^b$YN%Vyh-**_Fu@U$?i)^@Sxdsg;HjQ)1kV{`IMlXLbGNs*Ntq#ZT+hlB3Zkb1oTOv$0Q(_E^lL0gK-Tf|hHJu0o{X zQJbyTji8)k8Cpb29t{nJcM;U|-9kK~=NRO8n3~=&pd@Ig^?VW{Ck9Oi!I0>DhoHdA zy7h>bFlbP~35Uf@(4Nhz6hzJp+A6`)$u$K8ZJHX{i0F9+O{*ZTA~~F(pXv?*A{Pc- zQ9-`RXeH>)(X`EoTp4uR2m0*Cn+S?N^j|unr3~^=!S&%pPlEosbT$*w3k=e!U_coY zNl@*gylg~n43azL?n%}XWZbUEL9~oPu`1|lOv@qYLe1cIMD7fVP(e>h)nbB<-gqw` zkq3iTO5l^V6$sik5u`)p$smUcHk`6vB&c}Swgb_N3^J)8WACZm1WhVy^oU+!&}s=Z zb5=b;A9N@0LgdAuKneWr9-bx0{Aqv@kvD@RU+BM+f0Cf-Bzqwu9|koGI5T1|B*@&^ zR)k2!Ad>*Yq2xq@BJZS?B9a(nm%zPGP4lF5R+J+W3`!F)(|E#I`{FyendeG zDw8)tIIkyY%aR*Sh=Lh(L4aM_@ra;_^yU^sAq?skFyGQxNl^Cp@rMzu>Uz09C{64d7X_8LL@tNSg8q8a3nplu*QK~Tt}gwGJgFeq8T8}t91A*k}fFP|gQ zFi4I@k$c&n1Q{*O{fJ^2BuArfUtJhMeP!#;BYK5F(*l|%#Fqq({d)f^L~#u26i{E4 zFh)?Tr}-kHRSY^H=jjt8-3X6|ncpB<&7j@BFlL<`BAp%xg%!~n2FXuNW>*xwp1S6X zVMJO6Sp=+`X!@CSa(EiPMYNVdatNzqbtMG-+u}2VD4s#{0*WoU(F7@z#zzq)FlbbO n(YYy?pvMgjyN-r}tf3z~)^y00o_0&JlAJ6aa=kxpS`904cKS##DW4sXRW}#<@ zh1TuEJ69|@k{lPGC=5(fB&KfhE-+nA;{C6y)@uSR3REJf6F6GhTP{sdD@F0DIHe** zk(i=Rwqs}+yF4e>xAOm!&rOnV7GY&$;=fnhn9-UwVyp-jg9a6n6Fq?0w;BBYFD$^lu^s7pVNAEGv+DH709 z8kq(|)F|oZt5Z>cY-lv>?_Y~h#E@$YAP$YfqAR=+QjPT=1hkAs*7NmV2o19`;sDvw zh~4r*i%_;<$1y<5X{7x$Z~!6wU1udAI~o-aW!50{UivZ~kUfp=?@ek$NPp{U0w4z( zg}1VAAms5eSPjULMqF{U1|gO?GZ_$od`Jv)u#elK_f#=FAJgglNt@6l{Av? z-MSqi{cAxwASW6bTKWbN`qKaI93W>JS*e(T2)X-Qx&X+9Mx3b10fgk1ep!H4(dbO~ zMHhq=yMN~ZT1_MQXssJUk4tCs0J+lWRyaq7(C&q=@&UQgNKqbf9HF7mfw68*8X7rQ)j6OfrtaqrKztf?_-DUGD9!0{5umj+%5N3@L?~xARtw0TMs7jt z3<#ZFZl?pZjz+h?A2dTKMc7#e$b&}a4QzLW*r`bdKmrfIMk5H|ZaY zlBD1J?g3g)ql?B3Vua$J=RE-AMI%x5$3%p9Q-0-ugftr2Gw>dvG}A~WAQ6pnW0OY^ zQa&iC1|+7Dru}pzLRCpyYXEJa(H!6A2}0c;zSaTqrcqz>ZQRopj{Yg2jWjA*Wtu=q zHa}#IfHu*nQgz}kLMAS=8PH}L?U*V)ijW}VQ7a$`joga`3lN&-DB1w|&?t~Mjz5XN z3hg@q`O?U|`)e~wVo7_t0Qu3V6b5i`W!VK>Zp)oTK;jm6K?!CMvhyDy8YyKYTSp)*4Ox%~=+1aSoq>WG zP?%g;0|6x^6AiLxlktLKA&`PBwx|s)h!D=8Vo1Z$FXzzv`8Us;4`-hDI2t{BwV%K0 zDcu&(bUph*f1LA@^qjQqsvyYQs>+QD%e|-=a((VD|5Bll{%-rs`CFS=nn`wKy&J5` z3k|lLC!%)oA-CstKxY#s-Lqc@#T zF&L&%n{n-^hW#l;b3}jrS=0<_bFL+7XbEaACi<%P##z)BT$fhEV_ke7(Y0}LMmy*C zWC_>(YDmm)FeN&%K2(HS#&xM0DtvBjBHFdnNsQW(Ykf8R@x`5CqC=HNQq)#lOKRYq zfeO0cmiG+isI9ry)_}2WT>{CTP!7pZ+i=}l2mf3tZXjBs?X^a2%XQ;X7{4?qCi;|{ zR*qWE^|y6U=JGMkmSx>`KyAmhz6si-H^NBv%^f|?sO`CKZHA0%CPhTIb$2LGJ8<29 z8n!g|{Y|v*o+fwHj$F5%g(_)}G0``jjxIy(v_QAR%J_`8iJmC=(i^oi*N-lO{hwXu zh%Q`yzz?+x*OAvCS&{RU=;`U*0jOQM?$^NyeeepR9~Nf?qgHUOF+joL^V^9YRlOC4 z+KubjahR301`us@S%^Trbb+3RI6qAr(b1ytNYw6JYi1#ROu3rqVi*5t)E->_Yb*%r zb(@J!i1mm;?a8&5sqo6N-VH?S%p`HBmvLQbA%r$Wx)Oc;#@K4qUR+zrh1>2mRYZ4a z1_jhFaDC4~&<&SQ5pCssAs)3i*Qu^T!>qrI==)yHiKu-RXoc`sRP!j&nKD1zS__$e z#|Mc1@colY)B#+lS_;E=i}w+&OK91QI*@C*xzL$qlS6ds-XrO#gSg&eCN#zL4iX)5 zw`wcuV6I~&!qOCNAkpHn@@=R?xcd;LT{>Ikk2XTYT< zbq&!Ld51qjy@KnchamDv-bnPR?}tA{y^?GF6xhG#LI2ARovsjdB-f3TFt=)&wtd0< wuHC3#;@W)z2A_n`+vk|axgyk2Tvv`k*R5LGgPyx;51@|b`q2m&ufMwfKiPmD?EnA( literal 0 HcmV?d00001 diff --git a/log/nv1/log_nv1__metis/events.out.tfevents.1662370650.DESKTOP-D775MV0.13420.2 b/log/nv1/log_nv1__metis/events.out.tfevents.1662370650.DESKTOP-D775MV0.13420.2 new file mode 100644 index 0000000000000000000000000000000000000000..5dbd83091deb251478c9989e9dab3693eb612e33 GIT binary patch literal 2798 zcmZwIeJs=g7zgmvMZY_FCtRG@h`VryUd~1;sE(JQT%>2%8~ zD?=(}OX*h1>BJJwyhbRlUDL9)T2KA+Y|r!eXWxDQc=kLttbe1(iFD?)msNg{DL)4D zjFR^y?Us6lWTr?nBx3ncwwv95_xgE>J?EXpKj&%2HQ_R8={vopnIdm1mZ@c^J(tPm z|F>Nlb1^4B=xf*COjA8VuknAj9clFy>t4{a2z_2P;*2z(9p4X{P3X3|pnRlH2euA@ zo=xb>@}cWUhZj4Jg4QRrNzBXu(*4YyanJ^Y*2aG7L3)v4(O1xO2)(6B6^pcWkz5O! zA#~?f?uP#0G6l=! z7L9b_e9r*TPK3^hyn`=t#B!%V(9VSRDW4cd*%~er4BCazQw3{9NPp;@SP9yd(1VUQ zt|8qd?+FFHfY2GSHV2V5oLa1 zkk%ei#DMl7G_N5;jkT;U4zwqswW%XkNH^%O*$7%d=-$zeeMleNe?I}VkkFDH;WHD%}6(CG`XOc5_&jHHiC54 pWXo~T%LrXkALfAc9jB)!K}!f7-xq>^v2&iH0CWJM>txw|e*mdn>G}Ww literal 0 HcmV?d00001 diff --git a/log/power9/events.out.tfevents.1661997887.DESKTOP-D775MV0.15268.0 b/log/power9/events.out.tfevents.1661997887.DESKTOP-D775MV0.15268.0 new file mode 100644 index 0000000000000000000000000000000000000000..52c735ef2de161d807d1a35585ade92b18292686 GIT binary patch literal 40 rcmb1OfPlsI-b$Q`&kmkvIda2MiZ`h!F*8rkwJbHS#LDQ8c8ERz+h7c$ literal 0 HcmV?d00001 diff --git a/log/power9/log_power9_cuts/events.out.tfevents.1661998492.DESKTOP-D775MV0.15268.1 b/log/power9/log_power9_cuts/events.out.tfevents.1661998492.DESKTOP-D775MV0.15268.1 new file mode 100644 index 0000000000000000000000000000000000000000..f05f6bb658590e44b1b8b6765006fda595acc576 GIT binary patch literal 2918 zcmZwJdrZ?;6bEpS@ha~ES}JN0#-l=kDu~sQ=yH?}bWV*fV-MasRBT{4)D3V<qB{^Xn7d(SyJ61)G; zw~=H_t(SUa={6S@JA^KNrdLeoLapL#4g&acfXw$C<`s)MYB{>&ALGNVKmpEKg z5dUIEQi|T0dr1?h9p07@Z?11IL+`@9ogTc@stn@egUgPicjext0}7?_A;d3wa=Qw> zi2F-AP&#YtiC3GWYtXxKZ#BS-(EmB{)o)gOiQb+2q)vEov+O?c;R#c9=smb^?SjzC zV>86dW4C>U-jn;GPFUUgR8IUCvlr{pd)f541skly2kmffK<~|cPZwnRSz?Hn`qVX| z_u;;%6E3SxBoiN%F8&U^*oHU2I^UmjiSHfHZ9?zM{V5|@-+8x-_}CjASJ3-$Um?Ks z=*V5-Ps)Ot(fe~hAi(UlNDc8fFBG+)U&#Gd0T!C7)x;-t5B!LJ5%(Gaez{U{i}= z2cFm7LBEW9ZzB{o-l6lMe0F*ey^MRK9_|*06p;Li@VCt9gSao%Lu7IA*Tnm7zBY`0 zxeYJCSVP-&;#+h4M$iXypQHzEYTp^+MH-C-eF*oZ23VWubb@$m!M!o`uX3McfI+*Z zwZvyQ#yvnE%DqtsC$su#ziW>)JVYPHeUboEnPMZ!-%=Iv7=1YRm-H}xc+V5!GdF!Y zjXr{VZ#^7MHHQ;FI&}LfdO7zJ15|1Qz9+s#9sL~r3hrY%p-OmukNB$_YG%<#azFQ; zsc~g*5MSIfYVS0^KSXigqX)D3;1=SqXR931E4Xjf!D+KvN_<3joeTPC?niWR?$>@d z;$<}t+|b8x?=sh4Qx;mE`*PG?=wog84)|wPCGD???m97gCHDh5XlT~Z{ipw|#UK4j z?n?zo6g{TyS9%mLm$t5xE@lZ zn^eRPOR|E|ujYQPo(ED6RTH1qFcFGAf%{Ve{62YRBk_sP8szBLaKA?nO2K`Mc$51P f1$q_tbN{>Rv*{bek8ey-qF>AXfF4@Uww?bEvSd+m literal 0 HcmV?d00001 diff --git a/log/power9/log_power9_metis/events.out.tfevents.1661998492.DESKTOP-D775MV0.15268.2 b/log/power9/log_power9_metis/events.out.tfevents.1661998492.DESKTOP-D775MV0.15268.2 new file mode 100644 index 0000000000000000000000000000000000000000..2c2751df71587bb376c8a3a239187f918c5fb9af GIT binary patch literal 2918 zcmZwJeJm7U9Ki8Y&dZf@hdMg%@0XXHm%QAPUQtxYrmUQDRve}`g|w`^t!tIGa>Fn! zhVwEnWv$80PFmhpT#4#JlUqdED%SedKfmqw{QcSYkLTI*dmb+1->S;lXQTg6WS+=W zj0Tw*#HWi+rMT@rpOA9aUoau+eq-_9sm3@0{heQV;P^%?Hj9;F;Y1_a+ z{*|Ej9C&TgOP)pHzT}bJMet10|4Moti27V{bryIX(#uOq!jWJ8&AkF%m-N$hZ>Nw~ z$vShv>ydt>K)wliAKTnK@cN{enyH+S-?%hb2%bgy!`8pwAs?c0mx5nI`UyP`Bjk$? zNur`wTt@?9tfZ$SE81KL-S=X*rl1HX>+5lS5i@;6tiW#HG7{$_4p4f57e zy5-;vNuM_}hrJoQxDq^@^m^xq@1nk|i1PqEhxGQ#kLQu^ojzRy-iY)Ko#8gfd$e}d zfj1_7zh#yR`5RW&jo?j4-%(XHhkScQMho~2q?fV^l93PY8E6B)k@Q22)%M6cK6iTz zo=f^}3*$)SxzUOz;7v)-DfGnir@|nt3%nWW@+bCYFM@X_{c=F;G2|yl2Y-O)lfGHbut2^g#Qhie&7@}!%UY4I-yvNA??U>( zZT$-5FSkr;>HPgaxRSo7Ii?c%rUZW`csJ6w8hYXR>D*JS2i~3Z@ecPdqP~@MW)1i) zq~}*HL?fRp4p|T0gY@gX#rXZ5?J4Gf7m%K3Zm@v*{rA--;5|vNThx6N`J7KeQ}AA- zA1^v3MBXRv4iCIH>3d7VeUa~uoU{b*L;BRDFZza9!T%QK{xbj&C@V=z~ uxV>44{I;LveDHpx4>i5kguG&HrW<&F(rei2P~@lg>=%F!Abr;=!@)mgV_gXV literal 0 HcmV?d00001 diff --git a/log/radiation/events.out.tfevents.1662002627.DESKTOP-D775MV0.7696.0 b/log/radiation/events.out.tfevents.1662002627.DESKTOP-D775MV0.7696.0 new file mode 100644 index 0000000000000000000000000000000000000000..3f5b8cc146829543092fd5c21dec57579bb672f2 GIT binary patch literal 40 rcmb1OfPlsI-b$ROAKoe8JaWTPiZ`h!F*8rkwJbHS#L8%wYr{WjxAWmR*>YR}ipa&+7$6Yvex->yEoVk6ZB0)E|=BwI5=fuZ`4NDtl9t z8(5f?lb2PLrzu!AA2(9zIph8ubew#9y`=e~kJtRh77PB5SXDzNAr0qsK}ZZ)($EV2 z%e#{ZDIT^8Lt?~|t7&i|1gZ(Se7ijwi7`vMV_@OPmmY+~M<&E0S;&%8F%W#Kw~~$4h>%8;WPc=%EIB1$bJY`ilj981R7jjy z5+dMSdnYX{;X+sx5@(jw2-t4h+CvKS(%gzgvVtYM1gIvqt|w&ZQCTt)7nW2Dm|A3% zLr8tCQw9=OmK+msY(!f}$QM7J$wlJEk{tpHB~i_U+f=bfSW+b5 zw!tWUV!LX8Z9wA95@P{_Pfo;=Ed7xO+mQIM#9Bb#TMqVwRDG)Y8i_AU^k=zK?r2U( z?S;F&NLI0=LxA1%2XTbR>#hwT@ncD}0LT1aYzdi2&i)yRKTGr%Ch@fKRYFo)CvG4K zU`e=uiit~;glI;z_mBj##6=0Z>9Y=mbQi)KlRxNJ3enFYJ}~qtghH%I+s3S;LYt0cS(M?ImQ#(j!}ugt0_FV4Xht GC*@x%)RG2n*CdbtVnmZ@yt%%kZVsv zoS3B53Z+^Yn!Z?_Hc_NIoyfTTuj`lD@s-^z*#UI|F2|0;tk9(RtyUx}6=_P1+TN^s zOqiE6UEuK_96z*n3cIqw$GdN1t9}$&GRfW!k;8Q@{y_Ru#I|#CDk2&8Y{Wp!DUuNo zqetZG>)NS622jMM_&IJ&E{l-^8Ay?8?m`(#xm5pg77!LiYUC*+5ShVVrT{XCB3f?R zG(_H&m@fx1m?E)~b&ZHLd1a&lVN)bmEt`V~ODWa@;ZP*Aqwz8#`ct>I02xA&t!;Kc z5s6?W?*?K)5t}2;dl1R?Ox_QKp@?qT+Gs=!^9m|}45f%_#*t7&8gtxhfml*RzcJn! zkxgB-=YUvIM8`BZA>wHreFca$MT%NeOA%=<>b?oYh9cIr$M799xRgEuVoMPRL8LoM zc~dEC1!6~$-uc_FAQGL=X$N9Yk-#inE+TVI*7X44QbbW8G9uz8&Hn|&fg-<-o>_y) zhj${DMc+_zqzGq~b_F7`&Q41p!zi*LGdKwmd5duv5GRT}E_(kPk;@NrJ%Bh<*+mJkGlN0C_ViE>0@E{H>bxKiZyovK(wyfv?;0U1sal{D3a zNORAoI3RqARJBN6BBC$jB?57y$oiPOJVXxMsYwCiPLW;K+u4Y;Mg^||;z5yd(k3Ay zu8E&BfQ+DsN@8Y#$d}o@c|bfVa&orwZA8N5`T`(c6e-m_nS_XLfpi}bZ;H%R8S(eJ zvh3MmAR{R8Ev|VUuFC;(+eO%isWUM1fs??_p{r8_)%n03I72i z%0S0%ApR8DDi6hTtog^sA3(-XgfT=^pp=oS9CM~`T8b$0;^nMDL?&msGC;;sq)T&F zj7ZnLYA%ofio6!qJ0RklzL^gskRrl{o;pPQIKBcP<0z85&4j1ry7M(-fdo-xW^%hf zN?ET>kN_D^ku_Dl*AbC+1cw8eK#^^EAHE~PVcMqynMjfPsLF4MEVV733q(wj1o5>` Fe*u=jl{x?b literal 0 HcmV?d00001 diff --git a/log/ss1/events.out.tfevents.1661996726.DESKTOP-D775MV0.4204.0 b/log/ss1/events.out.tfevents.1661996726.DESKTOP-D775MV0.4204.0 new file mode 100644 index 0000000000000000000000000000000000000000..ddc0cc0b8ca64c6f2f63ae3183540f28163fbc31 GIT binary patch literal 40 rcmb1OfPlsI-b$R)Cd^sOeB_3s6mL>dVrHJ6YguYuiIvfrgMo_y*@q2K literal 0 HcmV?d00001 diff --git a/log/ss1/events.out.tfevents.1662366113.DESKTOP-D775MV0.15528.0 b/log/ss1/events.out.tfevents.1662366113.DESKTOP-D775MV0.15528.0 new file mode 100644 index 0000000000000000000000000000000000000000..9e882c4f5ee35dccee6706c91a73d0d80745e615 GIT binary patch literal 40 rcmb1OfPlsI-b$R+jMW)AM{hVv@g@}|X6EU+mZj#ESQ$C}EmHvi##0Oe literal 0 HcmV?d00001 diff --git a/log/ss1/events.out.tfevents.1662366420.DESKTOP-D775MV0.15524.0 b/log/ss1/events.out.tfevents.1662366420.DESKTOP-D775MV0.15524.0 new file mode 100644 index 0000000000000000000000000000000000000000..4647ff29475d90170ab79f9d8d213c4e274ef1b2 GIT binary patch literal 40 rcmb1OfPlsI-b$P&cbad_IeNoUiZ`h!F*8rkwJbHS#LDOhzw1)~;cg8z literal 0 HcmV?d00001 diff --git a/log/ss1/log_ss1_cuts/events.out.tfevents.1661996730.DESKTOP-D775MV0.4204.1 b/log/ss1/log_ss1_cuts/events.out.tfevents.1661996730.DESKTOP-D775MV0.4204.1 new file mode 100644 index 0000000000000000000000000000000000000000..dcdb012497d21bc04d459ccc771141c69a24cce7 GIT binary patch literal 2738 zcmZY8YfRH;6bA5WL1-rzMNm<&MTX)9kO3Q_p_&7HK+trK85pAykxNLsDkC~F*UYgc zLt~~!B!Q*KvS=`ZXdJ0-)JZS2dMI zCO`7%(QW=O#HVuK+@M!f75Q+zq;_Vo;YFnHqJ`KyJK=qT+dV_CBVN1^yNd3$5FGw( zQ4?bELVV)zTpq#qryFk|c3X&tT&L3sP7ay9iP(K1mgdNU2yPA7c?*V5lhTcB^Mp7uF4PSuKd z8N&)2bpAZ`0ZGozXfYvP&ajaDP0rgr1Xq0(Wz(=eJ@2rl6z0(I}rymEcDssDvuzzp#M%c;x!BlOS$US zl}+#wi_cGp*D@@#K*em0l;BHV`MrpPIJQGcQ$i8JUDMa^BMxR*D8F-T){S6i+H4=< zbqovND)ZxlRDw+@2@ep<7#3b5sk^eC;P`>^0mO2Kg{ADTPKhCS@8+&S#32mJY%o=~ z@jrrfTKN#-^$hFU;GOYDbc2nbYkol-%CM{h%n3ayBzeqn^C9Bb8P;1NXW6-Mf;$d9 z9Y!3+u+9!OC1+{*AFtd!ig*LVGCTP0(H|noc3Wx6Ffd`_ychi z!#W2FjAtdA~=5L{2byp z85Wr#_@uaVmfnk{$)*8O7`yUPp_7(sD literal 0 HcmV?d00001 diff --git a/log/ss1/log_ss1_metis/events.out.tfevents.1661996730.DESKTOP-D775MV0.4204.2 b/log/ss1/log_ss1_metis/events.out.tfevents.1661996730.DESKTOP-D775MV0.4204.2 new file mode 100644 index 0000000000000000000000000000000000000000..057435be0e69c7b66e8a8485252b12a97e435d40 GIT binary patch literal 2738 zcmZYBeJm7k7{KvLIVao+3FSTNbdJkA%}d_4n`L@gyPKBs{&JU>2!E*cqLp5hURaCL zyXEDwvJ(+Tq8xdz7^RHlNbO|Rdidwr^ZWa=?|%Dj&$HjUWs9dcOJL62Ta{C)(%9#1 zsCghMEJkeWml`2XapRl*6~-AYIYZ5xI?St%i;lxq*HUN1OQLwm$@Y57>Pmf!xrK8Z zmgs)okOlMB>|ojdBzrl$VULH|&j2$hw%s`&gYd%m>ngx% z6kpLPs6#lvOIHS%NpV4fxfHf!% zsTn+rFjr#t01Z-jL$GArG4C1#Cj`Z@o+oDmMkV_X9SixKKD+fUxVp z$pOG-6jy6Svk~55(=iCxoMLwDMLa1-6U{yVwxF1s*!2{Z%TGx@0=A@B`t#*=gvZ({ zhX8M+*u6N?9pN|O^TU9xDE7P``v~D5`aUCottobl`?wO}lj3uufHzSr;LDC9Eb@8# z3Gil$v+fn*Yb0LQ83W9r*x7+2MP+4`=nG&QisKJ;o<{hUsOT%;Efm)t)VP3fq~F*$ zU@pa_lfwxJ*N^Zg0NYX=F|++U!XavhCjs*)u09iIfbds^>>FS^iW#}(uP_e!GX>b5 zV%3t^B808-yru#3Dc&xw#lM0+?-L5ZTPaRTw{AgYd3@^(U`L*K g{%%~pw!C>Bup7nJydEP|j>-320PIe&eg1gbKbt-bkpKVy literal 0 HcmV?d00001 diff --git a/log/test1/events.out.tfevents.1662009066.DESKTOP-D775MV0.15544.0 b/log/test1/events.out.tfevents.1662009066.DESKTOP-D775MV0.15544.0 new file mode 100644 index 0000000000000000000000000000000000000000..1f3ae9fa35e34e3a3d075abfd9c65a7e0cd1fd32 GIT binary patch literal 40 rcmb1OfPlsI-b$QdyW4j0AGzTu#hX-=n3<>NT9%quVr7&dp7Ra>)E*3u literal 0 HcmV?d00001 diff --git a/log/test1/events.out.tfevents.1662014877.DESKTOP-D775MV0.1092.0 b/log/test1/events.out.tfevents.1662014877.DESKTOP-D775MV0.1092.0 new file mode 100644 index 0000000000000000000000000000000000000000..da6577526cf9de4874f36fc3c01995c48b523108 GIT binary patch literal 40 rcmb1OfPlsI-b$P|7$VX|kKAyS;!P?_%*@ksElbTSu`*huR3`%f!<7qX literal 0 HcmV?d00001 diff --git a/log/test1/log_test1_cuts/events.out.tfevents.1662012056.DESKTOP-D775MV0.15544.1 b/log/test1/log_test1_cuts/events.out.tfevents.1662012056.DESKTOP-D775MV0.15544.1 new file mode 100644 index 0000000000000000000000000000000000000000..3a22e944297bf98904b280d9630afa5d5df012d2 GIT binary patch literal 1918 zcmZwGTSyad6vuH*D>pCMZqCaz%S#$)MTRLwfgYpvV42v1lGEl2b!javSQ-{TDew`cwkkF zsC#r*h=0Lx)lqFidWBl6OWB;(yrEw4-{mKcuM_mY3qThy5(WwdCC4<0I`s+NCaK%V z3une6{^NbhG`ZmX3t_u^G|+P?ZY?b8CH#l8tq1wCrFhP8b`0T;vW{2Cy_Vwh$)h_6 zH|};$A@^o{qZ#g9HHH#imTjI#zMS!MW@t=a=qCJ^y-Xl>pSptaHwIAK62b|;d}v!R z@|BG1k3fd4GLGseHb^Y;E}O2lkmyzTdBwejCvN$b zRzE=a&lQci$bA`KlLv#<`fS2WTf$0_`!W7q1@=x|3gLTZPgf!LXWT~xmQU7b!hIu) z4afr+_f|u$MUzGNNKe5{B@q-qqe(tgnzCLMh5^_1? zbryIsvQ5-oBNo{!zas}gO%`o@mft2v}XRd3=!x_&v!^MufHwYhd-8LbQ zV0?WmGYO=v_b_#rRDNn8))j5}p-e@{_uIk7nHYH-b1r9^oGE fKPr&NFrIG%$H_$@;r8^Z1mv-d3#~9D+JEg2q1ydg literal 0 HcmV?d00001 diff --git a/log/test1/log_test1_metis/events.out.tfevents.1662012056.DESKTOP-D775MV0.15544.2 b/log/test1/log_test1_metis/events.out.tfevents.1662012056.DESKTOP-D775MV0.15544.2 new file mode 100644 index 0000000000000000000000000000000000000000..77caa15bada517478882e82d5feb555cc6af6036 GIT binary patch literal 1918 zcmZwFTS(IZ6b5kD%=w?Uxw(tBIj2s;LNhc_DN5W3rz{h-Vv(0j#JM&d8$M_cLSjS( z8X;k;RVGGR3TgxirHGeqs51(U4KFE60&`7$D0cGY!1YaTQ6c4ZWz~jcm8H^P&OF>7t}T@9y3&;Iamqu?pTHzQbD50OwNNJNIM?@vVTnI^aPRugKnDBVOAj ztpy%T@id)Mig>QOp$T{h#o5PCpGQ1MQF8-$D8;p@tqR0v$I9;k52JYVLHi8irgl*; za300uHf_y_Z+qOsz{4r-(01k_e!C;|GjKk|(7qfM?9-Wv;jPV z;)>D(nTYqYqS+C?0ShVasBpC)zGAv004}2Vv4IC_#B&_a;(?1P-ru;oh4}vTyma6a ziocpYB12p}Hm?F6N%0p}uMKf^AFl*>6va#XDn<}@PY9}j@1uAyOOcOwt3_c2E~WU} zi8?Xj%LY>ma2dtrxBf~HAAIAv13a4IuYde%MSR|4?*=ZXxUjQWhqyA(GXy+_;;I>w z1@U2Z$2f2W#lMAH(-7Z$T=o@sEXCE?)v<_&IH%Wu$5H&}^G!42miOa3z~d<%V<|a< scwtm`u+X=6CB^+YMQe!rEw0LdCr~^;<0$_7q^cG?0P6JvTL1t6 literal 0 HcmV?d00001 diff --git a/losses.py b/losses.py new file mode 100644 index 0000000..4e8a8cc --- /dev/null +++ b/losses.py @@ -0,0 +1,17 @@ +import torch +import torch.nn as nn +from utils_file.utils import * + +def loss_normalized_cut(y_pred, graph): + y = y_pred + d = degree(graph.edge_index[0], num_nodes=y.size(0)) + gamma = y.t() @ d + c = torch.sum(y[graph.edge_index[0], 0] * y[graph.edge_index[1], 1]) + return torch.sum(torch.div(c, gamma)) + +def loss_embedding(x,L): + mse=nn.MSELoss() + l=torch.tensor(0.) + for i in range(x.shape[1]): + l+=residual(x[:,i],L,mse) + return l diff --git a/models.py b/models.py new file mode 100644 index 0000000..0f318a2 --- /dev/null +++ b/models.py @@ -0,0 +1,129 @@ +import torch +import torch.nn as nn +from layers import * +from torch_geometric.nn import avg_pool, graclus +from torch_geometric.data import Batch +from layers import SAGEConv + + +# Neural network for the embedding module +class ModelSpectral(torch.nn.Module): + def __init__(self,se_params,device): + super(ModelSpectral,self).__init__() + self.l = se_params.get('l') + self.pre = se_params.get('pre') + self.post = se_params.get('post') + self.coarsening_threshold = se_params.get('coarsening_threshold') + self.activation = getattr(torch, se_params.get('activation')) + self.lins = se_params.get('lins') + + self.conv_post = nn.ModuleList( + [SAGEConv(self.l, self.l) for i in range(self.post)] + ) + self.conv_coarse = SAGEConv(2,self.l) + self.lins1=nn.Linear(self.l,self.lins[0]) + self.lins2=nn.Linear(self.lins[0],self.lins[1]) + self.lins3=nn.Linear(self.lins[1],self.lins[2]) + self.final=nn.Linear(self.lins[2],2) + self.device = device + + def forward(self, graph): + x, edge_index, batch = graph.x, graph.edge_index, graph.batch + unpool_info = [] + x_info=[] + cluster_info=[] + edge_info=[] + while x.size()[0] > self.coarsening_threshold: + cluster = graclus(edge_index,num_nodes=x.shape[0]) + cluster_info.append(cluster) + edge_info.append(edge_index) + gc = avg_pool(cluster, Batch(batch=batch, x=x, edge_index=edge_index)) + x, edge_index, batch = gc.x, gc.edge_index, gc.batch + # coarse iterations + x=torch.eye(2).to(self.device) + x=self.conv_coarse(x,edge_index) + x=self.activation(x) + while edge_info: + # un-pooling / interpolation / prolongation / refinement + edge_index = edge_info.pop() + output, inverse = torch.unique(cluster_info.pop(), return_inverse=True) + x = x[inverse] + # post-smoothing + for i in range(self.post): + x = self.activation(self.conv_post[i](x, edge_index)) + x=self.lins1(x) + x=self.activation(x) + x=self.lins2(x) + x=self.activation(x) + x=self.lins3(x) + x=self.activation(x) + x=self.final(x) + x,_=torch.linalg.qr(x,mode='reduced') + return x + +# Neural network for the partitioning module +class ModelPartitioning(torch.nn.Module): + def __init__(self,pe_params): + super(ModelPartitioning,self).__init__() + + self.l = pe_params.get('l') + self.pre = pe_params.get('pre') + self.post = pe_params.get('post') + self.coarsening_threshold = pe_params.get('coarsening_threshold') + self.activation = getattr(torch, pe_params.get('activation')) + self.lins = pe_params.get('lins') + + self.conv_first = SAGEConv(1, self.l) + self.conv_pre = nn.ModuleList( + [SAGEConv(self.l, self.l) for i in range(self.pre)] + ) + self.conv_post = nn.ModuleList( + [SAGEConv(self.l, self.l) for i in range(self.post)] + ) + self.conv_coarse = SAGEConv(self.l,self.l) + + self.lins1=nn.Linear(self.l,self.lins[0]) + self.lins2=nn.Linear(self.lins[0],self.lins[1]) + self.lins3=nn.Linear(self.lins[1],self.lins[2]) + self.final=nn.Linear(self.lins[4],2) + + def forward(self, graph): + x, edge_index, batch = graph.x, graph.edge_index, graph.batch + x = self.activation(self.conv_first(x, edge_index)) + unpool_info = [] + x_info=[] + cluster_info=[] + edge_info=[] + batches=[] + while x.size()[0] > self.coarsening_threshold: + # pre-smoothing + for i in range(self.pre): + x = self.activation(self.conv_pre[i](x, edge_index)) + # pooling / coarsening / restriction + x_info.append(x) + batches.append(batch) + cluster = graclus(edge_index,num_nodes=x.shape[0]) + cluster_info.append(cluster) + edge_info.append(edge_index) + gc = avg_pool(cluster, Batch(batch=batch, x=x, edge_index=edge_index)) + x, edge_index, batch = gc.x, gc.edge_index, gc.batch + # coarse iterations + x = self.activation(self.conv_coarse(x,edge_index)) + while edge_info: + # un-pooling / interpolation / prolongation / refinement + edge_index = edge_info.pop() + output, inverse = torch.unique(cluster_info.pop(), return_inverse=True) + x = (x[inverse] + x_info.pop())/2 + # post-smoothing + for i in range(self.post): + x = self.activation(self.conv_post[i](x, edge_index)) + x=self.lins1(x) + x=self.activation(x) + x=self.lins2(x) + x=self.activation(x) + x=self.lins3(x) + x=self.activation(x) + x=self.final(x) + x=torch.softmax(x,dim=1) + return x + diff --git a/test.py b/test.py new file mode 100644 index 0000000..b50b936 --- /dev/null +++ b/test.py @@ -0,0 +1,117 @@ +import hashlib +import os +import pickle +from urllib.request import urlretrieve +import numpy as np +from PIL import Image +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import LabelBinarizer +from sklearn.utils import resample +from tqdm import tqdm +from zipfile import ZipFile +print('All modules imported.') + +def download(url, file): + + if not os.path.isfile(file): + print('Downloading ' + file + '...') + urlretrieve(url, file) + print('Download Finished') + +download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip') +download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip') + +assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\ +'notMNIST_train.zip file is corrupted. Remove the file and try again.' +assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\ +'notMNIST_test.zip file is corrupted. Remove the file and try again.' + +print('All files downloaded.') + +def uncompress_features_labels(file): + + features = [] + labels = [] + with ZipFile(file) as zipf: + + filenames_pbar = tqdm(zipf.namelist(), unit='files') + + for filename in filenames_pbar: + + if not filename.endswith('/'): #str.endswith() + + with zipf.open(filename) as image_file: + image = Image.open(image_file) + image.load() + feature = np.array(image, dtype=np.float32).flatten() + label = os.path.split(filename)[1][0] + features.append(feature) + labels.append(label) + return np.array(features), np.array(labels) + + +train_features, train_labels = uncompress_features_labels('notMNIST_train.zip') +test_features, test_labels = uncompress_features_labels('notMNIST_test.zip') +docker_size_limit = 150000 +train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit) +is_features_normal = False +is_labels_encod = False +print('All features and labels uncompressed.') + +# 对于灰度图像数据的归⼀化 +def normalize_grayscale(image_data): + + a = 0.1 + b = 0.9 + grayscale_min = 0 + grayscale_max = 255 + return a + ( ( (image_data - grayscale_min)*(b - a) )/( grayscale_max - grayscale_min ) ) + +# 如果标记为False,则归⼀化处理,并置标记为True +if not is_features_normal: + train_features = normalize_grayscale(train_features) + test_features = normalize_grayscale(test_features) + is_features_normal = True + +if not is_labels_encod: +# 应⽤独热编码,将labels转化成数字(0/1表⽰) + encoder = LabelBinarizer() + encoder.fit(train_labels) + train_labels = encoder.transform(train_labels) + test_labels = encoder.transform(test_labels) +# 转化为float32的格式,便于后⾯在TensorFlow可以进⾏乘法运算 + train_labels = train_labels.astype(np.float32) + test_labels = test_labels.astype(np.float32) + is_labels_encod = True + +assert is_features_normal, 'You skipped the step to normalize the features' +assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels' +# 为训练集和验证集随机选取数据 +train_features, valid_features, train_labels, valid_labels = train_test_split( + train_features, + train_labels, + test_size=0.05, + random_state=832289) + +pickle_file = 'notMNIST.pickle' + +# 保存数据⽅便调⽤ +pickle_file = 'notMNIST.pickle' +if not os.path.isfile(pickle_file): #判断是否存在此⽂件,若⽆则存储 + print('Saving data to pickle file...') + try: + with open('notMNIST.pickle', 'wb') as pfile: + pickle.dump( + { + 'train_dataset': train_features, + 'train_labels': train_labels, + 'valid_dataset': valid_features, + 'valid_labels': valid_labels, + 'test_dataset': test_features, + 'test_labels': test_labels, + }, + pfile, pickle.HIGHEST_PROTOCOL) + except Exception as e: + print('Unable to save data to', pickle_file, ':', e) + raise +print('Data cached in pickle file.') diff --git a/test_pickle.py b/test_pickle.py new file mode 100644 index 0000000..3be57d1 --- /dev/null +++ b/test_pickle.py @@ -0,0 +1,63 @@ +import pickle +import math + +import numpy as np +from tqdm import tqdm +import matplotlib.pyplot as plt + +import numpy as np +import networkx as nx +import scipy +from layers import * +from config import config_gap +from losses import * +import random +import torch +import losses +from models import * +from utils_file.utils import * +from torch_geometric.loader import DataLoader +from torch_geometric.data import Data +from torch.utils.tensorboard import SummaryWriter +import os +from utils_file.dataset_utils import mixed_dataset + + +# Seeds +torch.manual_seed(176364) +np.random.seed(453658) +random.seed(41884) +torch.cuda.manual_seed(9597121) + +# config = config_gap(data_path='data/suitesparse',batch_size=1,mode='train') + +# print(config.dataset) + +# pickle_path = 'ss1.pickle' +# # with open(pickle_path,'wb') as f: +# # pickle.dump(config.loader, f,pickle.HIGHEST_PROTOCOL) + +# with open(pickle_path,'rb') as f: +# pickle_data = pickle.load(f) +# print(pickle_data) + + +# for d in pickle_data: +# print(d) + +# def to_pickle(config.loader): +# print(os.path.isfile('pickle_file/'+'ss1'+'.pickle')) + +# print(os.path.expanduser('pickle_file/'+'ss1'+'.pickle')) + +# loader,dataset = mixed_dataset(data='ss1') + +# with open('wode.pickle','wb') as f: +# pickle.dump([1,2,3,4,6],f) + +# with open('wode.pickle','rb') as f: +# data = pickle.load(f) + +# print(data) + +remove_file_or_folder('test_os/test_folder',mode='all in folder') \ No newline at end of file diff --git a/testing.py b/testing.py new file mode 100644 index 0000000..cdd50ae --- /dev/null +++ b/testing.py @@ -0,0 +1,16 @@ +import numpy as np +import networkx as nx +import scipy +from layers import * +from config import config_gap +from losses import * +import random +import torch +import losses +from models import * +from utils_file.utils import * +from torch_geometric.loader import DataLoader +from torch_geometric.data import Data + + + diff --git a/training.py b/training.py new file mode 100644 index 0000000..fddb8f4 --- /dev/null +++ b/training.py @@ -0,0 +1,186 @@ +import numpy as np +import networkx as nx +import scipy +from layers import * +from config import config_gap +from losses import * +import random +import torch +import losses +from models import * +from utils_file.utils import * +from torch_geometric.loader import DataLoader +from torch_geometric.data import Data +from torch.utils.tensorboard import SummaryWriter + +def train(config): + device = config.device + # spectral for graph embedding + if config.mode=='train' and config.model == "spectral for graph embedding": + if config.is_se: + f = ModelSpectral(config.se_params,device).to(device) + f.train() + print('Number of parameters:',sum(p.numel() for p in f.parameters())) + print('') + optimizer = torch.optim.Adam(f.parameters(),**config.se_opt) + loss_fn = loss_embedding + print('Start spectral embedding module') + print(' ') + for i in range(config.se_epoch): + for d in config.loader: + d = d.to(device) + L = laplacian(d).to(device) + x = f(d) + loss = loss_fn(x,L) + optimizer.zero_grad() + loss.backward() + optimizer.step() + print('Epoch:',i,' Loss:',loss) + print('End training') + print('') + + # Save the model + torch.save(f.state_dict(), config.se_savepath) + print('Model saved') + print('') + + if config.is_pe: + print('Start partitioning embedding module') + f = ModelSpectral(config.se_params, device).to(device) + f.load_state_dict(torch.load(config.se_savepath)) + f.eval() + for p in f.parameters(): + p.requires_grad = False + f.eval() + dataset = [] + for d in config.loader: + d = d.to(device) + L = laplacian(d).to(device) + x = f(d) + x = x[:,1:] + x = (x-torch.mean(x)) * \ + torch.sqrt(torch.tensor(x.shape[0])) + dataset.append(Data(x=x,edge_index=d.edge_index)) + loader = DataLoader(dataset,batch_size=1,shuffle=False) + + f_lap = ModelPartitioning(config.pe_params) + f_lap.train() + loss_fn = loss_normalized_cut + optimizer = torch.optim.Adam(f_lap.parameters(),**config.pe_opt) + max_cut = 1000000. + for i in range(config.pe_epoch): + for d in loader: + d = d.to(device) + data = f_lap(d) + _,_,_,cuts,t,ia,ib,ic,id = best_part(data,d,2) + writer.add_scalars(config.plot_path, + {'cuts':cuts,'metis':config.baseline},i) + + print('cut: ',cuts) + print('ia: ',len(ia)) + print('ib: ',len(ib)) + loss = loss_fn(data,d) + optimizer.zero_grad() + loss.backward() + optimizer.step() + print('Epoch:',i,' Loss:',loss) + if(int(cuts)!=0 and cuts0.85 ): + max_cut = cuts + torch.save(f_lap.state_dict(), config.pe_savepath) + print('Model saved') + print('') + # torch.save(f_lap.state_dict(), config.pe_savepath) + # print('Model saved') + # print('') + +def testing(config): + if config.mode=='test' and config.model == "spectral for graph embedding": + device = config.device + f = ModelSpectral(config.se_params, device).to(device) + f.load_state_dict(torch.load(config.se_savepath)) + f.eval() + for p in f.parameters(): + p.requires_grad = False + f.eval() + f_lap = ModelPartitioning(config.pe_params).to(device) + f_lap.load_state_dict(torch.load(config.pe_savepath)) + f_lap.eval() + for p in f_lap.parameters(): + p.requires_grad = False + f_lap.eval() + dataset = [] + for d in config.loader: + d = d.to(device) + L = laplacian(d).to(device) + x = f(d) + x = x[:,1:] + x = (x-torch.mean(x)) * \ + torch.sqrt(torch.tensor(x.shape[0])) + dataset.append(Data(x=x,edge_index=d.edge_index)) + loader = DataLoader(dataset,batch_size=1,shuffle=False) + for d in loader: + d = d.to(device) + data = f_lap(d) + _,_,_,cuts,t,ia,ib,ic,id = best_part(data,d,2) + print('cut: ',cuts) + print('ia: ',max(len(ia),len(ib))) + print('ib: ',min(len(ia),len(ib))) + print('balance: ',max(len(ia),len(ib))/min(len(ia),len(ib))) + + + +if __name__ == '__main__': + # Seeds + torch.manual_seed(176364) + np.random.seed(453658) + random.seed(41884) + torch.cuda.manual_seed(9597121) + + # data: all or only one + # data = 'all' + data = 'nv1' + + # 删除文件,用断点删除 , mode = 'all in folder','file','folder' + remove_file_or_folder('log/'+data,mode='all in folder') + + # config + config = config_gap(data=data,batch_size=1,mode='train') + config.data = data + config.is_plot = True + config.plot_path = 'log/'+config.data+'/' + config.baseline,config.balance = pymetis_baseline(config.dataset[0]) + print('data: ',config.dataset[0]) + print('') + print('The number of nodes: ',config.dataset[0].storage._sparse_sizes[0]) + print('') + print('The number of edges: ',int(config.dataset[0].storage._row.shape[0]/2)) + print('') + print('metis cuts: ',config.baseline) + print('metis balance: ',config.balance) + config.model = 'spectral for graph embedding' + # spectral embedding optimizer == se_opt(dict)(lr,weight_decay) + config.se_opt = {'lr':0.001,'weight_decay':5e-6} + # partitioning embdding optimizer == pm_opt(dict)(lr,weight_decay) + config.pe_opt = {'lr':0.001,'weight_decay':5e-6} + # whether to run spectral embedding + config.is_se = True + # whether to run partitiong embedding + config.is_pe = True + config.se_params = {'l':32,'pre':2,'post':2,'coarsening_threshold':2,'activation':'tanh','lins':[16,32,32,16,16]} + config.pe_params = {'l':32,'pre':4,'post':4,'coarsening_threshold':2,'activation':'tanh','lins':[16,16,16,16,16]} + config.se_epoch = 100 + config.pe_epoch = 60 + config.se_savepath = 'spectral_weights/spectral_weights_'+config.data+'.pt' + config.pe_savepath = 'partitioning_weights/partitioning_weights_'+config.data+'.pt' + print('Number of SuiteSparse graphs:',len(config.dataset)) + print('') + + writer = SummaryWriter(config.plot_path) + train(config) + config.mode='test' + testing(config) + writer.close() + + + + \ No newline at end of file diff --git a/utils_file/dataset_utils.py b/utils_file/dataset_utils.py new file mode 100644 index 0000000..d2e3214 --- /dev/null +++ b/utils_file/dataset_utils.py @@ -0,0 +1,87 @@ +import os +from utils_file.utils import * +from scipy.io import mmread +import torch_sparse +from torch_sparse import SparseTensor as st +import networkx as nx +import numpy as np +from torch_geometric.loader import DataLoader +from torch_geometric.data import Data +import random +import pickle + +torch.manual_seed(176364) +np.random.seed(453658) +random.seed(41884) + +def mixed_dataset(data='all',binary_weight=True,batch_size=1): + dataset_list = [] + dataset = [] + # if file_exist(pkl_path) + # : + # else: + if data=='all': + for m in os.listdir(os.path.expanduser('data/suitesparse'+'/')): + adj = mmread(os.path.expanduser('data/suitesparse'+'/'+str(m))) + adj = torch_sparse.remove_diag(st.from_scipy(adj)).to_symmetric() + adj = to_continuous_natural_number(adj) + g = nx.Graph(st.to_scipy(adj,layout='coo')) + if nx.is_connected(g): + if binary_weight: + adj = st.set_value(adj, torch.ones_like(adj.storage._value),layout='coo') + dataset_list.append(adj) + + else: + dataset_list.append(adj) + + else: + raise Exception('have isolated nodes') + + for adj in dataset_list: + adj = st.to_scipy(adj,layout='coo') + row = adj.row + col = adj.col + rowcols = np.array([row,col]) + edges = torch.tensor(rowcols,dtype=torch.long) + nodes = torch.randn(adj.shape[0],2) + dataset.append(Data(x=nodes, edge_index=edges)) + loader = DataLoader(dataset,batch_size=batch_size,shuffle=False) + + return dataset_list,loader + else: + pickle_path = 'pickle_file/' + data_path = 'data/suitesparse/' + if os.path.isfile(pickle_path+data+'.pickle'): + with open(pickle_path+data+'.pickle','rb') as f: + pickle_data = pickle.load(f) + return pickle_data[0],pickle_data[1] + else: + adj = mmread(os.path.expanduser('data/suitesparse'+'/'+data+'.mtx')) + adj = torch_sparse.remove_diag(st.from_scipy(adj)).to_symmetric() + adj = to_continuous_natural_number(adj) + g = nx.Graph(st.to_scipy(adj,layout='coo')) + if nx.is_connected(g): + if binary_weight: + adj = st.set_value(adj, torch.ones_like(adj.storage._value),layout='coo') + dataset_list.append(adj) + + else: + dataset_list.append(adj) + + else: + raise Exception('have isolated nodes') + + for adj in dataset_list: + adj = st.to_scipy(adj,layout='coo') + row = adj.row + col = adj.col + rowcols = np.array([row,col]) + edges = torch.tensor(rowcols,dtype=torch.long) + nodes = torch.randn(adj.shape[0],2) + dataset.append(Data(x=nodes, edge_index=edges)) + loader = DataLoader(dataset,batch_size=batch_size,shuffle=False) + + with open(pickle_path+data+'.pickle','wb') as f: + pickle.dump([loader,dataset_list],f) + + return loader,dataset_list diff --git a/utils_file/utils.py b/utils_file/utils.py new file mode 100644 index 0000000..8968cf3 --- /dev/null +++ b/utils_file/utils.py @@ -0,0 +1,414 @@ +import math +import numpy as np +import torch +import scipy.sparse as sp +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +import torch.nn.functional as F +import torch.nn as nn +# from models import * +from torch_sparse import SparseTensor as st +import torch_sparse +from tqdm.auto import tqdm +import pandas as pd +from torch_geometric.utils import to_networkx, degree, to_scipy_sparse_matrix, get_laplacian, remove_self_loops +import timeit +from collections import Counter +import pymetis +import os +import shutil + +def input_matrix(): + ''' + Returns a test sparse SciPy adjecency matrix + ''' + + N = 7 + data = np.ones(2 * 9) + row = np.array([0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6]) + col = np.array([2, 3, 4, 6, 0, 4, 5, 6, 0, 4, 5, 1, 2, 3, 2, 3, 1, 2]) + + A = sp.coo_matrix((data, (row, col)), shape=(N, N)) + + return A + +def symnormalize(A): + """ + symmetrically normalise torch_sparse matrix + + arguments: + M: torch sparse matrix + + returns: + D^{-1/2} M D^{-1/2} + where D is the diagonal node-degree matrix + """ + + + d = torch_sparse.sum(A,1) + dhi = torch.pow(d, -1/2).flatten() + dhi[torch.isinf(dhi)] = 0 + dhi = dhi.numpy() + DHI = sp.diags(dhi) + + return sparse_mx_to_torch_sparse_tensor((DHI.dot(st.to_scipy(A))).dot(DHI)) + +def normalize(A): + """normalize A [0,1] + + Args: + A (_type_): sparse matrix + """ + + + +def sparse_mx_to_torch_sparse_tensor(sparse_mx): + """Convert a scipy sparse matrix to a torch.sparse""" + sparse_mx = sparse_mx.tocoo().astype(np.float32) + indices = torch.from_numpy( + np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) + values = torch.from_numpy(sparse_mx.data) + shape = torch.Size(sparse_mx.shape) + return torch.sparse.FloatTensor(indices, values, shape) + +def torch_sparse_tensor_to_sparse_mx(torch_sparse_mx): + """Convert torch.sparse to a scipy sparse + + Returns: + scipy sparse + """ + torch_sparse_mx_indices = torch_sparse_mx._indices() + torch_sparse_mx_values = torch_sparse_mx._values() + sparse_mx = sp.coo_matrix((torch_sparse_mx_values,(torch_sparse_mx_indices[0],torch_sparse_mx_indices[1])),(torch_sparse_mx.shape[0],torch_sparse_mx.shape[1])) + return sparse_mx + +def torch_sparse_add(torch_sparse_mx,num): + return torch.sparse.FloatTensor(torch_sparse_mx._indices() + ,torch_sparse_mx._values()+num, + torch_sparse_mx.shape) + +def abs_SparseTensor(torch_sparse_mx): + """abs SparseTensor + + Returns: + SparseTensor + """ + return st.set_value(A, torch.abs(A.storage._value)) # SparseTensor + +def test_partition(Y): + """search the position of the maximum of probability + + Args: + Y (_type_): + row : the number of nodes + col : the number of class + + Returns: + one dimension matrix + """ + + _, idx = torch.max(Y, 1) + return idx + +def proportion_partition(y): + """The proportion of each partition + + Args: + y probability matrix + + Returns: + list[class1,class2,...] + """ + classes_num = y.shape[1] + nodes_num = y.shape[0] + proportion_hashmap = {} + + # the position of maximum of probability + idx = test_partition(y) + idx = idx.tolist() + + for i in range(len(idx)): + if not idx[i] in proportion_hashmap: + proportion_hashmap[idx[i]]=1 + else: + proportion_hashmap[idx[i]]=proportion_hashmap.get(idx[i])+1 + + list_partition = [x/nodes_num for x in list(proportion_hashmap.values())] + if len(list_partition)!=classes_num: + for j in range(classes_num-len(list_partition)): + list_partition.append(0) + + return sorted(list_partition,reverse = True) + +def edge_cut(A,y): + """Edge cut: the ratio of the cut to the total number of edges + + Args: + A (torch_sparse): For now, it's 01 adjacency matrix,no self loop + y (tensor matrix): probability matrix + + Returns: + cut/|E| + """ + + if isinstance(A, torch.Tensor): + A = st.from_torch_sparse_coo_tensor(A) + # n*1 + idx = torch.unsqueeze(test_partition(y), dim=1) + # 0-->1 , 1-->0 + idx_not = torch.where((idx==0)|(idx==1),idx^1,idx).t() + + + B = st.to_torch_sparse_coo_tensor(A) + + idx = torch.tensor(idx,dtype=torch.float32) + + idx_not = torch.tensor(idx_not,dtype=torch.float32) + # B = torch.tensor(B,dtype=torch.float32) + B = B.to(dtype=torch.float32) + return (2*torch.mm(idx_not,torch.mm(B,idx))/A.storage._row.shape[0]) + +def from_txt_to_torch_sparse(data_path): + """turn txt into torch_sparse + + Args: + data_path (_type_): data path + + Returns: + _type_: torch_sparse + """ + + data = pd.read_csv(data_path,sep=' ',header = None,names=['row','col','values']) + data_tensor = torch.tensor(np.array(data)) + row,col,values = torch.split(data_tensor,(1,1,1),dim=1) + indices = torch.vstack((row.t(),col.t())) + values = torch.squeeze(values, dim=1).to(dtype=torch.float32) + sparse_matrix = torch.sparse_coo_tensor(indices,values) + + + return st.from_torch_sparse_coo_tensor(sparse_matrix) + +def from_adjacency_matrix_to_adjacency_list(adjacency_matrix): + """turn adjacency matrix into adjacency list(input of pymetis) + adjacency_list = [np.array([4, 2, 1]), + np.array([0, 2, 3]), + np.array([4, 3, 1, 0]), + np.array([1, 2, 5, 6]), + np.array([0, 2, 5]), + np.array([4, 3, 6]), + np.array([5, 3])] + n_cuts, membership = pymetis.part_graph(2, adjacency=adjacency_list) + # n_cuts = 3 + # membership = [1, 1, 1, 0, 1, 0, 0] + + nodes_part_0 = np.argwhere(np.array(membership) == 0).ravel() # [3, 5, 6] + nodes_part_1 = np.argwhere(np.array(membership) == 1).ravel() # [0, 1, 2, 4] + + Args: + adjacency_matrix (_type_): torch_sparse + + Returns: + _type_: adjacency_list(list) + """ + dict_pymetis = {} + row = adjacency_matrix.storage._row.tolist() + col = adjacency_matrix.storage._col.tolist() + + for i in range(len(row)): + if not row[i] in dict_pymetis: + dict_list = [col[i]] + dict_pymetis[row[i]]=dict_list + else: + dict_list = dict_pymetis[row[i]] + dict_list.append(col[i]) + + dict_list = [] + for value in dict_pymetis.values(): + dict_list.append(value) + + return dict_list + +def to_continuous_natural_number(adjacent_matrix): + """1 3 5 5 5 to 0 1 2 2 2 + + Args: + adjacent_matrix (_type_): torch_sparse + + Returns: + _type_: torch_sparse + """ + + row = adjacent_matrix.storage._row.tolist() + col = adjacent_matrix.storage._col.tolist() + + dict_continuous_number = {} + num = 0 + for i in range(len(row)): + if not row[i] in dict_continuous_number: + dict_continuous_number[row[i]] = num + num = num+1 + + row = torch.tensor([dict_continuous_number[row[i]] for i in range(len(row))]) + col = torch.tensor([dict_continuous_number[col[i]] for i in range(len(col))]) + indices = torch.vstack((row,col)) + + continuous_sparse_matrix = torch.sparse_coo_tensor(indices, adjacent_matrix.storage._value) + + return st.from_torch_sparse_coo_tensor(continuous_sparse_matrix) + +# def edge_cut_test(A,y): + +# if isinstance(A, torch.Tensor): +# A = st.from_torch_sparse_coo_tensor(A) +# idx = test_partition(y) +# idx = idx.tolist() +# classes_hashmap = {} + +# for i in range(len(idx)): +# if not idx[i] in classes_hashmap: +# classes_hashmap[idx[i]] = [i] +# else: +# classes_hashmap[idx[i]].append(i) + +# # classes_hashmap = sorted(classes_hashmap.items(),key = lambda x : x[0]) + +# row = A.storage._row +# col = A.storage._col +# cut = 0 + +# for i in tqdm(range(row.shape[0])): + +# for j in range(y.shape[1]-1): + +# if row[i] in classes_hashmap[j]: + +# for k in range(j+1,y.shape[1]): + +# if col[i] in classes_hashmap[k]: + +# cut = cut+1 + +# return 2*cut/A.storage._row.shape[0] + + + + +# def Check(mat): +# #iterative each row of COO +# row_len +# for r,c,value in range(nz): +# row_len[r] = row_len[r]+1 +# for row in range(dim): +# if row_len[row]==1: +# print(row) + + +def laplacian(graph): + lap=get_laplacian(graph.edge_index,num_nodes=graph.num_nodes) + L=torch.sparse_coo_tensor(lap[0],lap[1]) + D=torch.sparse_coo_tensor(torch.stack((torch.arange(graph.num_nodes),torch.arange(graph.num_nodes)),dim=0),lap[1][-graph.num_nodes:]) + Dinv=torch.pow(D,-1) + return torch.sparse.mm(Dinv,L) + +# Computes the sum of the eigenvector residual and the eigenvalue related to the vector x +def residual(x,L,mse): + return mse(L.matmul(x),rayleigh_quotient(x,L)*x)+rayleigh_quotient(x,L) + +def rayleigh_quotient(x,L): + return (torch.t(x).matmul(L.matmul(x))/(torch.t(x).matmul(x))) + +def best_part(data, graph, n_times): + ncuts = [] + vols = [] + preds = [] + cuts = [] + t0 = timeit.default_timer() + graph_ev = data + t1 = timeit.default_timer() - t0 + predictions = torch.argmax(graph_ev, dim=1) + graph_pred = torch_from_preds(graph, predictions) + + # graph_pred.x = torch.tensor([0.,0.,0.,1.,1.,1.]) # only test,if real data comment out + # vola total degree of partition a + # volb total degree of partition complementary set a + # cut edge between a and b + + nc_gap, vola, volb, cut,ia,ib= normalized_cut(graph_pred) + ncuts.append(nc_gap) + cuts.append(cut) + vols.append((vola, volb)) + preds.append(predictions) + for i in range(1, n_times): + t0_loop = timeit.default_timer() + graph_ev = data + t1_loop = timeit.default_timer() - t0_loop + predictions = torch.argmax(graph_ev, dim=1) + graph_pred = torch_from_preds(graph, predictions) + nc_gap, vola, volb, cut,ic,id = normalized_cut(graph_pred) + ncuts.append(nc_gap) + vols.append((vola, volb)) + preds.append(predictions) + cuts.append(cut) + min_nc = np.argmin(ncuts) + return ncuts[min_nc], vols[min_nc], preds[min_nc], cuts[min_nc], t1 + t1_loop,ia,ib,ic,id + + +def torch_from_preds(graph, preds): + graph_torch = graph + graph_torch.x = preds + return graph_torch + + +def volumes(graph): + ia = torch.where(graph.x == torch.tensor(0.))[0] + ib = torch.where(graph.x == torch.tensor(1.))[0] + degs = degree( + graph.edge_index[0], + num_nodes=graph.x.size(0), + dtype=torch.uint8) # degs the degree of all nodes (tensor) + da = torch.sum(degs[ia]).detach().item() # total degree of partition a + db = torch.sum(degs[ib]).detach().item() # total degree of partition complementary set a + cut = torch.sum(graph.x[graph.edge_index[0]] != + graph.x[graph.edge_index[1]]).detach().item() / 2 + return cut, da, db,ia,ib + +def cut(graph): + cut = torch.sum((graph.x[graph.edge_index[0], + :2] != graph.x[graph.edge_index[1], + :2]).all(axis=-1)).detach().item() / 2 + return cut + +def normalized_cut(graph): + c, dA, dB,ia,ib = volumes(graph) + if dA == 0 or dB == 0: + return 2, 0, dA if dA != 0 else dB, c,ia,ib + else: + return c / dA + c / dB, dA, dB, c,ia,ib + +def pymetis_baseline(data): + adjacent_list = from_adjacency_matrix_to_adjacency_list(data) + n_cut,membership = pymetis.part_graph(2, adjacency=adjacent_list) + balance = list(Counter(membership).values()) + return n_cut,balance + +def remove_file_or_folder(path,mode='file'): + if mode=='file': + print('delete file') + try: + os.remove(path) + except Exception as result: + print(str(result)+' , '+'Deleting fails') + elif mode=='folder': + print('delete empty folder') + try: + os.rmdir(path) + except Exception as result: + print(str(result)+' , '+'Deleting fails') + elif mode=='all in folder': + print('delete all in folder') + try: + shutil.rmtree(path) + except Exception as result: + print(str(result)+' , '+'Deleting fails') + else: + print('no mode')