G. Serra, and M. Niepert. Proceedings of the European Conference on Machine Learning(ECML 2024), (2024)
Abstract
Graph Neural Networks (GNNs) are a popular class of machine learning models. Inspired by the learning to explain (L2X) paradigm, we propose L2XGNN, a framework for explainable GNNs that provides faithful explanations by design. L2XGNN learns a mechanism for selecting explanatory subgraphs (motifs), which are exclusively used in the GNN message-passing operations. L2XGNN can select, for each input graph, a subgraph with specific properties, such as being sparse and connected. Imposing such constraints on the motifs often leads to more interpretable and effective explanations. Experiments on several datasets suggest that L2XGNN achieves the same classification accuracy as baseline methods using the entire input graph while ensuring that only the provided explanations are used to make predictions. Moreover, we show that L2XGNN can identify motifs responsible for the graph's properties it is intended to predict.
%0 Conference Paper
%1 serra2024l2xgnnlearningexplaingraph
%A Serra, Giuseppe
%A Niepert, Mathias
%B Proceedings of the European Conference on Machine Learning(ECML 2024)
%D 2024
%J Machine Learning Journal
%K ki mls
%T L2XGNN: Learning to Explain Graph Neural Networks
%U https://arxiv.org/abs/2209.14402
%X Graph Neural Networks (GNNs) are a popular class of machine learning models. Inspired by the learning to explain (L2X) paradigm, we propose L2XGNN, a framework for explainable GNNs that provides faithful explanations by design. L2XGNN learns a mechanism for selecting explanatory subgraphs (motifs), which are exclusively used in the GNN message-passing operations. L2XGNN can select, for each input graph, a subgraph with specific properties, such as being sparse and connected. Imposing such constraints on the motifs often leads to more interpretable and effective explanations. Experiments on several datasets suggest that L2XGNN achieves the same classification accuracy as baseline methods using the entire input graph while ensuring that only the provided explanations are used to make predictions. Moreover, we show that L2XGNN can identify motifs responsible for the graph's properties it is intended to predict.
@inproceedings{serra2024l2xgnnlearningexplaingraph,
abstract = {Graph Neural Networks (GNNs) are a popular class of machine learning models. Inspired by the learning to explain (L2X) paradigm, we propose L2XGNN, a framework for explainable GNNs that provides faithful explanations by design. L2XGNN learns a mechanism for selecting explanatory subgraphs (motifs), which are exclusively used in the GNN message-passing operations. L2XGNN can select, for each input graph, a subgraph with specific properties, such as being sparse and connected. Imposing such constraints on the motifs often leads to more interpretable and effective explanations. Experiments on several datasets suggest that L2XGNN achieves the same classification accuracy as baseline methods using the entire input graph while ensuring that only the provided explanations are used to make predictions. Moreover, we show that L2XGNN can identify motifs responsible for the graph's properties it is intended to predict.},
added-at = {2024-09-11T02:59:47.000+0200},
archiveprefix = {arXiv},
author = {Serra, Giuseppe and Niepert, Mathias},
biburl = {https://puma.ub.uni-stuttgart.de/bibtex/28500f1dddd5d30be77086b2a70b676fc/joy},
booktitle = {Proceedings of the European Conference on Machine Learning(ECML 2024)},
eprint = {2209.14402},
interhash = {718af26f22bafce43e12406ca5c8c54d},
intrahash = {8500f1dddd5d30be77086b2a70b676fc},
journal = {Machine Learning Journal},
keywords = {ki mls},
language = {en},
primaryclass = {cs.LG},
timestamp = {2024-09-11T03:15:13.000+0200},
title = {L2XGNN: Learning to Explain Graph Neural Networks},
url = {https://arxiv.org/abs/2209.14402},
year = 2024
}