Finding optimal message quantization is a key requirement for low complexity belief propagation (BP) decoding. To this end, we propose a floating-point surrogate model that imitates quantization effects as additions of uniform noise, whose amplitudes are trainable variables. We verify that the surrogate model closely matches the behavior of a fixed-point implementation and propose a hand-crafted loss function to realize a trade-off between complexity and error-rate performance. A deep learning-based method is then applied to optimize the message bitwidths. Moreover, we show that parameter sharing can both ensure implementation-friendly solutions and results in faster training convergence than independent parameters. We provide simulation results for 5G low-density parity-check (LDPC) codes and report an error-rate performance within 0.2 dB of floating-point decoding at an average message quantization bitwidth of 3.1 bits. In addition, we show that the learned bitwidths also generalize to other code rates and channels
%0 Conference Paper
%1 10008635
%A Geiselhart, Marvin
%A Elkelesh, Ahmed
%A Clausius, Jannis
%A Liang, Fei
%A Xu, Wen
%A Liang, Jing
%A Brink, Stephan Ten
%B 2022 IEEE Globecom Workshops (GC Wkshps)
%D 2022
%K myown from:mgeiselhart
%P 467-472
%R 10.1109/GCWkshps56602.2022.10008635
%T Learning Quantization in LDPC Decoders
%X Finding optimal message quantization is a key requirement for low complexity belief propagation (BP) decoding. To this end, we propose a floating-point surrogate model that imitates quantization effects as additions of uniform noise, whose amplitudes are trainable variables. We verify that the surrogate model closely matches the behavior of a fixed-point implementation and propose a hand-crafted loss function to realize a trade-off between complexity and error-rate performance. A deep learning-based method is then applied to optimize the message bitwidths. Moreover, we show that parameter sharing can both ensure implementation-friendly solutions and results in faster training convergence than independent parameters. We provide simulation results for 5G low-density parity-check (LDPC) codes and report an error-rate performance within 0.2 dB of floating-point decoding at an average message quantization bitwidth of 3.1 bits. In addition, we show that the learned bitwidths also generalize to other code rates and channels
@inproceedings{10008635,
abstract = {Finding optimal message quantization is a key requirement for low complexity belief propagation (BP) decoding. To this end, we propose a floating-point surrogate model that imitates quantization effects as additions of uniform noise, whose amplitudes are trainable variables. We verify that the surrogate model closely matches the behavior of a fixed-point implementation and propose a hand-crafted loss function to realize a trade-off between complexity and error-rate performance. A deep learning-based method is then applied to optimize the message bitwidths. Moreover, we show that parameter sharing can both ensure implementation-friendly solutions and results in faster training convergence than independent parameters. We provide simulation results for 5G low-density parity-check (LDPC) codes and report an error-rate performance within 0.2 dB of floating-point decoding at an average message quantization bitwidth of 3.1 bits. In addition, we show that the learned bitwidths also generalize to other code rates and channels},
added-at = {2023-01-16T14:32:48.000+0100},
author = {Geiselhart, Marvin and Elkelesh, Ahmed and Clausius, Jannis and Liang, Fei and Xu, Wen and Liang, Jing and Brink, Stephan Ten},
biburl = {https://puma.ub.uni-stuttgart.de/bibtex/2fc6f4b06b0089299b15952d2bef1811a/inue},
booktitle = {2022 IEEE Globecom Workshops (GC Wkshps)},
description = {Learning Quantization in LDPC Decoders | IEEE Conference Publication | IEEE Xplore},
doi = {10.1109/GCWkshps56602.2022.10008635},
interhash = {3b31f95071e2f56711dcd1836d7e7be7},
intrahash = {fc6f4b06b0089299b15952d2bef1811a},
keywords = {myown from:mgeiselhart},
month = dec,
pages = {467-472},
timestamp = {2023-01-16T13:32:48.000+0100},
title = {Learning Quantization in LDPC Decoders},
year = 2022
}