import torch
from torch.nn import Parameter
from torch_geometric.nn.conv import MessagePassing
from ..inits import reset, uniform
[docs]class NNConv(MessagePassing):
r"""The continuous kernel-based convolutional operator from the
`"Neural Message Passing for Quantum Chemistry"
<https://arxiv.org/abs/1704.01212>`_ paper.
This convolution is also known as the edge-conditioned convolution from the
`"Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on
Graphs" <https://arxiv.org/abs/1704.02901>`_ paper (see
:class:`torch_geometric.nn.conv.ECConv` for an alias):
.. math::
\mathbf{x}^{\prime}_i = \mathbf{\Theta} \mathbf{x}_i +
\sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \cdot
h_{\mathbf{\Theta}}(\mathbf{e}_{i,j}),
where :math:`h_{\mathbf{\Theta}}` denotes a neural network, *.i.e.*
a MLP.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
nn (torch.nn.Module): A neural network :math:`h_{\mathbf{\Theta}}` that
maps edge features :obj:`edge_attr` of shape :obj:`[-1,
num_edge_features]` to shape
:obj:`[-1, in_channels * out_channels]`, *e.g.*, defined by
:class:`torch.nn.Sequential`.
aggr (string, optional): The aggregation scheme to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"add"`)
root_weight (bool, optional): If set to :obj:`False`, the layer will
not add the transformed root node features to the output.
(default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self,
in_channels,
out_channels,
nn,
aggr='add',
root_weight=True,
bias=True,
**kwargs):
super(NNConv, self).__init__(aggr=aggr, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.nn = nn
self.aggr = aggr
if root_weight:
self.root = Parameter(torch.Tensor(in_channels, out_channels))
else:
self.register_parameter('root', None)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
[docs] def reset_parameters(self):
reset(self.nn)
uniform(self.in_channels, self.root)
uniform(self.in_channels, self.bias)
[docs] def forward(self, x, edge_index, edge_attr):
""""""
x = x.unsqueeze(-1) if x.dim() == 1 else x
pseudo = edge_attr.unsqueeze(-1) if edge_attr.dim() == 1 else edge_attr
return self.propagate(edge_index, x=x, pseudo=pseudo)
def message(self, x_j, pseudo):
weight = self.nn(pseudo).view(-1, self.in_channels, self.out_channels)
return torch.matmul(x_j.unsqueeze(1), weight).squeeze(1)
def update(self, aggr_out, x):
if self.root is not None:
aggr_out = aggr_out + torch.mm(x, self.root)
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
ECConv = NNConv