01 January 1970 4 6K Report

Python code:

class HGNN(nn.Module):

def __init__(self, H, in_size, out_size, hidden_dims=hidden_dim):

super().__init__()

self.Theta1 = nn.Linear(in_size, hidden_dims)

self.Theta2 = nn.Linear(hidden_dims, out_size)

self.dropout = nn.Dropout(0)

# Node degree

d_V = H.sum(1).to_dense().double()#torch.sparse.sum(H, dim=1).to_dense()#

# Edge degree

d_E = H.sum(0).to_dense().double()#torch.sparse.sum(H, dim=0).to_dense()#

n_edges = d_E.shape[0]

# D_V ** (-1/2)

D_V_invsqrt = torch.diag(torch.pow(d_V,-0.5))

# D_E ** (-1)

D_E_inv = torch.diag(1./d_E)

# W

W = torch.eye(n_edges)

# Compute the Laplacian

self.laplacian = D_V_invsqrt.double() @ H.double() @ W.double() @ D_E_inv.double() @ H.T.double() @ D_V_invsqrt.double()

def forward(self, X):

Dr_float= self.dropout(self.Theta1(X)).to(self.Theta1.weight.dtype)

#lap_float = torch.tensor(self.laplacian, dtype=torch.float64)

lap_float= self.laplacian.to(self.Theta1.weight.dtype)

X = lap_float @ Dr_float

#X = X.to(self.Theta1.weight.dtype)

X = F.relu(X)

X= X.to(self.Theta1.weight.dtype)

Dr2_float= self.dropout(self.Theta2(X)).to(self.Theta1.weight.dtype)

X = lap_float @ Dr2_float

print(10)

# Add an activation function here

#X = torch.sigmoid(X)

return X

def compute_node_representations(self, X):

return self.forward(X)

More Wang Lu's questions See All
Similar questions and discussions