-
Notifications
You must be signed in to change notification settings - Fork 0
/
GCN.py
73 lines (70 loc) · 4.21 KB
/
GCN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import torch
from torch import nn
# 这个是个无向图卷积
# 训练可以使用梯度下降进行计算(pytorch中的方法都行)
# 采用的是空域卷积的模式
# 但是只需要输入顶点就可以进行计算
# 正如大家所见,图卷积拥有一定的局限性(比如顶点数目必须固定)
# 因此在卷积的过程中,势必只能学习N个顶点之间的关系
# node_features是一个N个顶点,每个顶点拥有N_features个特征的顶点
class GCN:
def __init__(self,node_count:int, alpha:float):
self.__node_count = node_count
self.__W_edge = nn.Parameter(torch.zeros(size=(node_count, node_count)))
nn.init.xavier_uniform_(self.__W_edge, gain=1.414)
self.__alpha = alpha
self.__leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self,node_features):
return self.__leakyrelu(torch.mm(node_features,torch.div(self.__W_edge + torch.transpose(self.__W_edge,0,1),2)))
# 这个是个无向图卷积(这个是一个基于特征压缩的GCN)
# 这个GCN是可以扩充或压缩节点特征(单纯变换可以来做残差模块)
# 训练可以使用梯度下降进行计算(pytorch中的方法都行)
# 采用的是空域卷积的模式
# 但是只需要输入顶点就可以进行计算
# 正如大家所见,图卷积拥有一定的局限性(比如顶点数目必须固定)
# 因此在卷积的过程中,势必只能学习N个顶点之间的关系
# node_features是一个N个顶点,每个顶点拥有N_features个特征的顶点
class GCN_CompressFeatures:
def __init__(self,node_count:int,input_feature_N:int,output_feature_N:int, alpha:float):
self.__node_count = node_count
self.__feature_compress_activition = nn.ELU(0.3)
self.__W_feature_compress = nn.Parameter(torch.zeros(size=(input_feature_N, output_feature_N)))
nn.init.xavier_uniform_(__W_feature_compress, gain=1.414)
self.__W_edge = nn.Parameter(torch.zeros(size=(node_count, node_count)))
nn.init.xavier_uniform_(__W_edge, gain=1.414)
self.__alpha = alpha
self.__leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self,node_features):
feature_compress = self.__feature_compress_activition(torch.mm(self.__W_feature_compress,node_features))
return self.__leakyrelu(torch.mm(feature_compress,torch.div(self.__W_edge + torch.transpose(self.__W_edge,0,1),2)))
# 有向图卷积
class GDCN:
def __init__(self,node_count:int, alpha:float):
self.__node_count = node_count
self.__W_edge = nn.Parameter(torch.zeros(size=(node_count, node_count)))
nn.init.xavier_uniform_(self.__W_edge, gain=1.414)
self.__alpha = alpha
self.__leakyrelu = nn.LeakyReLU(self.alpha)
self.__A = nn.Parameter(torch.zeros(size=(node_count, node_count)))
nn.init.xavier_uniform_(self.__A, gain=1.414)
self.__B = nn.Parameter(torch.zeros(size=(node_count, node_count)))
nn.init.xavier_uniform_(self.__B, gain=1.414)
def forward(self,node_features):
return self.__leakyrelu(torch.mm(node_features,self.__A * self.__W_edge + self.__B * torch.transpose(self.__W_edge,0,1)))
# 有向图卷积
# 这个GCN是可以扩充或压缩节点特征(单纯变换可以来做残差模块)
class GDCN_CompressFeatures:
def __init__(self,node_count:int, alpha:float,input_feature_N:int,output_feature_N:int):
self.__node_count = node_count
self.__W_edge = nn.Parameter(torch.zeros(size=(node_count, node_count)))
self.__W_feature_compress = nn.Parameter(torch.zeros(size=(input_feature_N, output_feature_N)))
nn.init.xavier_uniform_(__W_feature_compress, gain=1.414)
nn.init.xavier_uniform_(self.__W_edge, gain=1.414)
self.__alpha = alpha
self.__leakyrelu = nn.LeakyReLU(self.alpha)
self.__A = nn.Parameter(torch.zeros(size=(node_count, node_count)))
nn.init.xavier_uniform_(self.__A, gain=1.414)
self.__B = nn.Parameter(torch.zeros(size=(node_count, node_count)))
nn.init.xavier_uniform_(self.__B, gain=1.414)
def forward(self,node_features):
return self.__leakyrelu(torch.mm(torch.mm(self.__W_feature_compress,node_features),self.__A * self.__W_edge + self.__B * torch.transpose(self.__W_edge,0,1)))