The ability to perform fast and accurate atomistic simulations is crucial for advancing the chemical sciences. By learning from high-quality data, machine-learned interatomic potentials achieve accuracy on par with ab initio and first-principles methods at a fraction of their computational cost. The success of machine-learned interatomic potentials arises from integrating inductive biases such as equivariance to group actions on an atomic system, e.g., equivariance to rotations and reflections. In particular, the field has notably advanced with the emergence of equivariant message passing. Most of these models represent an atomic system using spherical tensors, tensor products of which require complicated numerical coefficients and can be computationally demanding. Cartesian tensors offer a promising alternative, though state-of-the-art methods lack flexibility in message-passing mechanisms, restricting their architectures and expressive power. This work explores higher-rank irreducible Cartesian tensors to address these limitations. We integrate irreducible Cartesian tensor products into message-passing neural networks and prove the equivariance and traceless property of the resulting layers. Through empirical evaluations on various benchmark data sets, we consistently observe on-par or better performance than that of state-of-the-art spherical and Cartesian models.
%0 Conference Paper
%1 zaverkin2024higherrankirreduciblecartesiantensors
%A Zaverkin, Viktor
%A Alesiani, Francesco
%A Maruyama, Takashi
%A Errica, Federico
%A Christiansen, Henrik
%A Takamoto, Makoto
%A Weber, Nicolas
%A Niepert, Mathias
%B In Proceedings of the 38th Annual Conference on Neural Information Processing Systems (NeurIPS 2024)
%D 2024
%K mls
%R https://doi.org/10.48550/arXiv.2405.14253
%T Higher-Rank Irreducible Cartesian Tensors for Equivariant Message Passing
%U https://arxiv.org/abs/2405.14253
%X The ability to perform fast and accurate atomistic simulations is crucial for advancing the chemical sciences. By learning from high-quality data, machine-learned interatomic potentials achieve accuracy on par with ab initio and first-principles methods at a fraction of their computational cost. The success of machine-learned interatomic potentials arises from integrating inductive biases such as equivariance to group actions on an atomic system, e.g., equivariance to rotations and reflections. In particular, the field has notably advanced with the emergence of equivariant message passing. Most of these models represent an atomic system using spherical tensors, tensor products of which require complicated numerical coefficients and can be computationally demanding. Cartesian tensors offer a promising alternative, though state-of-the-art methods lack flexibility in message-passing mechanisms, restricting their architectures and expressive power. This work explores higher-rank irreducible Cartesian tensors to address these limitations. We integrate irreducible Cartesian tensor products into message-passing neural networks and prove the equivariance and traceless property of the resulting layers. Through empirical evaluations on various benchmark data sets, we consistently observe on-par or better performance than that of state-of-the-art spherical and Cartesian models.
@inproceedings{zaverkin2024higherrankirreduciblecartesiantensors,
abstract = {The ability to perform fast and accurate atomistic simulations is crucial for advancing the chemical sciences. By learning from high-quality data, machine-learned interatomic potentials achieve accuracy on par with ab initio and first-principles methods at a fraction of their computational cost. The success of machine-learned interatomic potentials arises from integrating inductive biases such as equivariance to group actions on an atomic system, e.g., equivariance to rotations and reflections. In particular, the field has notably advanced with the emergence of equivariant message passing. Most of these models represent an atomic system using spherical tensors, tensor products of which require complicated numerical coefficients and can be computationally demanding. Cartesian tensors offer a promising alternative, though state-of-the-art methods lack flexibility in message-passing mechanisms, restricting their architectures and expressive power. This work explores higher-rank irreducible Cartesian tensors to address these limitations. We integrate irreducible Cartesian tensor products into message-passing neural networks and prove the equivariance and traceless property of the resulting layers. Through empirical evaluations on various benchmark data sets, we consistently observe on-par or better performance than that of state-of-the-art spherical and Cartesian models.},
added-at = {2025-01-27T08:49:48.000+0100},
archiveprefix = {arXiv},
author = {Zaverkin, Viktor and Alesiani, Francesco and Maruyama, Takashi and Errica, Federico and Christiansen, Henrik and Takamoto, Makoto and Weber, Nicolas and Niepert, Mathias},
biburl = {https://puma.ub.uni-stuttgart.de/bibtex/2f833b2523391d718cac6a21f9e4f50f4/mls},
booktitle = {In Proceedings of the 38th Annual Conference on Neural Information Processing Systems (NeurIPS 2024)},
doi = {https://doi.org/10.48550/arXiv.2405.14253},
eprint = {2405.14253},
eventtitle = {2024 Conference on Neural Information Processing Systems},
interhash = {7db5708eb62e2ceb47b6716e86b951ef},
intrahash = {f833b2523391d718cac6a21f9e4f50f4},
keywords = {mls},
language = {en},
primaryclass = {cs.LG},
timestamp = {2025-01-27T08:49:48.000+0100},
title = {Higher-Rank Irreducible Cartesian Tensors for Equivariant Message Passing},
url = {https://arxiv.org/abs/2405.14253},
year = 2024
}