Selecting only the relevant subsets from all gathered data has never been as challenging as it is in these times of big data and sensor fusion. Multiple complementary methods have emerged for the observation of similar phenomena; oftentimes, many of these techniques are superimposed in order to make the best possible decisions. A pathologist, for example, uses microscopic and spectroscopic techniques to discriminate between healthy and cancerous tissue. Especially in the field of spectroscopy in medicine, an immense number of frequencies are recorded and appropriately sized datasets are rarely acquired due to the time-intensive measurements and the lack of patients. In order to cope with the curse of dimensionality in machine learning, it is necessary to reduce the overhead from irrelevant or redundant features. In this article, we propose a feature selection callback algorithm (FeaSel-Net) that can be embedded in deep neural networks. It recursively prunes the input nodes after the optimizer in the neural network achieves satisfying results. We demonstrate the performance of the feature selection algorithm on different publicly available datasets and compare it to existing feature selection methods. Our algorithm combines the advantages of neural networks’ nonlinear learning ability and the embedding of the feature selection algorithm into the actual classifier optimization.
%0 Journal Article
%1 make4040049
%A Fischer, Felix
%A Birk, Alexander
%A Somers, Peter
%A Frenner, Karsten
%A Tarín, Cristina
%A Herkommer, Alois
%D 2022
%J Machine Learning and Knowledge Extraction
%K alexander_birk alois_herkommer felix_fischer grk2543 ito journal karsten_frenner ods reviewed
%N 4
%P 968--993
%R 10.3390/make4040049
%T FeaSel-Net: A Recursive Feature Selection Callback in Neural Networks
%U https://www.mdpi.com/2504-4990/4/4/49
%V 4
%X Selecting only the relevant subsets from all gathered data has never been as challenging as it is in these times of big data and sensor fusion. Multiple complementary methods have emerged for the observation of similar phenomena; oftentimes, many of these techniques are superimposed in order to make the best possible decisions. A pathologist, for example, uses microscopic and spectroscopic techniques to discriminate between healthy and cancerous tissue. Especially in the field of spectroscopy in medicine, an immense number of frequencies are recorded and appropriately sized datasets are rarely acquired due to the time-intensive measurements and the lack of patients. In order to cope with the curse of dimensionality in machine learning, it is necessary to reduce the overhead from irrelevant or redundant features. In this article, we propose a feature selection callback algorithm (FeaSel-Net) that can be embedded in deep neural networks. It recursively prunes the input nodes after the optimizer in the neural network achieves satisfying results. We demonstrate the performance of the feature selection algorithm on different publicly available datasets and compare it to existing feature selection methods. Our algorithm combines the advantages of neural networks’ nonlinear learning ability and the embedding of the feature selection algorithm into the actual classifier optimization.
@article{make4040049,
abstract = {Selecting only the relevant subsets from all gathered data has never been as challenging as it is in these times of big data and sensor fusion. Multiple complementary methods have emerged for the observation of similar phenomena; oftentimes, many of these techniques are superimposed in order to make the best possible decisions. A pathologist, for example, uses microscopic and spectroscopic techniques to discriminate between healthy and cancerous tissue. Especially in the field of spectroscopy in medicine, an immense number of frequencies are recorded and appropriately sized datasets are rarely acquired due to the time-intensive measurements and the lack of patients. In order to cope with the curse of dimensionality in machine learning, it is necessary to reduce the overhead from irrelevant or redundant features. In this article, we propose a feature selection callback algorithm (FeaSel-Net) that can be embedded in deep neural networks. It recursively prunes the input nodes after the optimizer in the neural network achieves satisfying results. We demonstrate the performance of the feature selection algorithm on different publicly available datasets and compare it to existing feature selection methods. Our algorithm combines the advantages of neural networks’ nonlinear learning ability and the embedding of the feature selection algorithm into the actual classifier optimization.},
added-at = {2022-12-01T12:51:26.000+0100},
author = {Fischer, Felix and Birk, Alexander and Somers, Peter and Frenner, Karsten and Tarín, Cristina and Herkommer, Alois},
biburl = {https://puma.ub.uni-stuttgart.de/bibtex/25033474c7e2b01c7f702c009580db92f/ffischer},
doi = {10.3390/make4040049},
interhash = {9eeeba3edd8680e845db0686e0abd93c},
intrahash = {5033474c7e2b01c7f702c009580db92f},
issn = {2504-4990},
journal = {Machine Learning and Knowledge Extraction},
keywords = {alexander_birk alois_herkommer felix_fischer grk2543 ito journal karsten_frenner ods reviewed},
number = 4,
pages = {968--993},
timestamp = {2023-08-23T14:33:45.000+0200},
title = {FeaSel-Net: A Recursive Feature Selection Callback in Neural Networks},
url = {https://www.mdpi.com/2504-4990/4/4/49},
volume = 4,
year = 2022
}