Neat work! I was looking at the implementation of the laplacian encoding, and some things weren't clear.
def laplacian_positional_encoding(g, pos_enc_dim):
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with numpy
EigVal, EigVec = np.linalg.eig(L.toarray())
idx = EigVal.argsort() # increasing order
EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
g.ndata['lap_pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
return g
Why do you drop the first eigenvector in the last line (i.e. why do you run use indexes 1:pos_enc_dim+1
)? Does this come from the assumption that the first eigenvalue will be very close to 0?
g.ndata['lap_pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])