
{  
   "types" : {
      "Bookmark" : {
         "pluralLabel" : "Bookmarks"
      },
      "Publication" : {
         "pluralLabel" : "Publications"
      },
      "GoldStandardPublication" : {
         "pluralLabel" : "GoldStandardPublications"
      },
      "GoldStandardBookmark" : {
         "pluralLabel" : "GoldStandardBookmarks"
      },
      "Tag" : {
         "pluralLabel" : "Tags"
      },
      "User" : {
         "pluralLabel" : "Users"
      },
      "Group" : {
         "pluralLabel" : "Groups"
      },
      "Sphere" : {
         "pluralLabel" : "Spheres"
      }
   },
   
   "properties" : {
      "count" : {
         "valueType" : "number"
      },
      "date" : {
         "valueType" : "date"
      },
      "changeDate" : {
         "valueType" : "date"
      },
      "url" : {
         "valueType" : "url"
      },
      "id" : {
         "valueType" : "url"
      },
      "tags" : {
         "valueType" : "item"
      },
      "user" : {
         "valueType" : "item"
      }      
   },
   
   "items" : [
   	  
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2949e1602cfb340948aef9bfea31b3bd2/inue",         
         "tags" : [
            "myown","from:sdnr","autoencoder","coding","ml"
         ],
         
         "intraHash" : "949e1602cfb340948aef9bfea31b3bd2",
         "interHash" : "1718fad335b8f87c716162107a5c03af",
         "label" : "Serial vs. Parallel Turbo-Autoencoders and Accelerated Training for Learned Channel Codes",
         "user" : "inue",
         "description" : "",
         "date" : "2022-04-25 20:32:04",
         "changeDate" : "2022-04-25 18:32:04",
         "count" : 4,
         "pub-type": "inproceedings",
         "booktitle": "2021 11th International Symposium on Topics in Coding (ISTC)",
         "year": "2021", 
         "url": "https://ieeexplore.ieee.org/document/9594130", 
         
         "author": [ 
            "Jannis Clausius","Sebastian Dörner","Sebastian Cammerer","Stephan ten Brink"
         ],
         "authors": [
         	
            	{"first" : "Jannis",	"last" : "Clausius"},
            	{"first" : "Sebastian",	"last" : "Dörner"},
            	{"first" : "Sebastian",	"last" : "Cammerer"},
            	{"first" : "Stephan",	"last" : "ten Brink"}
         ],
         "pages": "1-5","abstract": "Attracted by its scalability towards practical code-word lengths, we revisit the idea of Turbo-autoencoders for end-to-end learning of PHY-Layer communications. For this, we study the existing concepts of Turbo-autoencoders from the literature and compare the concept with state-of-the-art classical coding schemes. We propose a new component-wise training algorithm based on the idea of Gaussian a priori distributions that reduces the overall training time by almost a magnitude. Further, we propose a new serial architecture inspired by classical serially concatenated Turbo code structures and show that a carefully optimized interface between the two component autoencoders is required. To the best of our knowledge, these serial Turbo autoencoder structures are the best known neural network based learned sequences that can be trained from scratch without any required expert knowledge in the domain of channel codes.",
         "doi" : "10.1109/ISTC49272.2021.9594130",
         
         "bibtexKey": "9594130"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2949e1602cfb340948aef9bfea31b3bd2/sdnr",         
         "tags" : [
            "autoencoder","coding","ml","myown"
         ],
         
         "intraHash" : "949e1602cfb340948aef9bfea31b3bd2",
         "interHash" : "1718fad335b8f87c716162107a5c03af",
         "label" : "Serial vs. Parallel Turbo-Autoencoders and Accelerated Training for Learned Channel Codes",
         "user" : "sdnr",
         "description" : "",
         "date" : "2021-11-12 15:25:07",
         "changeDate" : "2022-04-25 18:32:04",
         "count" : 4,
         "pub-type": "inproceedings",
         "booktitle": "2021 11th International Symposium on Topics in Coding (ISTC)",
         "year": "2021", 
         "url": "https://ieeexplore.ieee.org/document/9594130", 
         
         "author": [ 
            "Jannis Clausius","Sebastian Dörner","Sebastian Cammerer","Stephan ten Brink"
         ],
         "authors": [
         	
            	{"first" : "Jannis",	"last" : "Clausius"},
            	{"first" : "Sebastian",	"last" : "Dörner"},
            	{"first" : "Sebastian",	"last" : "Cammerer"},
            	{"first" : "Stephan",	"last" : "ten Brink"}
         ],
         "pages": "1-5","abstract": "Attracted by its scalability towards practical code-word lengths, we revisit the idea of Turbo-autoencoders for end-to-end learning of PHY-Layer communications. For this, we study the existing concepts of Turbo-autoencoders from the literature and compare the concept with state-of-the-art classical coding schemes. We propose a new component-wise training algorithm based on the idea of Gaussian a priori distributions that reduces the overall training time by almost a magnitude. Further, we propose a new serial architecture inspired by classical serially concatenated Turbo code structures and show that a carefully optimized interface between the two component autoencoders is required. To the best of our knowledge, these serial Turbo autoencoder structures are the best known neural network based learned sequences that can be trained from scratch without any required expert knowledge in the domain of channel codes.",
         "doi" : "10.1109/ISTC49272.2021.9594130",
         
         "bibtexKey": "9594130"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/28ede931e14dcbd0204afb9c68ec874b5/inue",         
         "tags" : [
            "myown","from:sdnr","coding","ml"
         ],
         
         "intraHash" : "8ede931e14dcbd0204afb9c68ec874b5",
         "interHash" : "2c63a5029a56d0ddef6d1911b13e4ec9",
         "label" : "On Recurrent Neural Networks for Sequence-based Processing in Communications",
         "user" : "inue",
         "description" : "",
         "date" : "2020-04-28 11:48:06",
         "changeDate" : "2020-04-28 09:48:06",
         "count" : 4,
         "pub-type": "inproceedings",
         "booktitle": "2019 53rd Asilomar Conference on Signals, Systems, and Computers",
         "year": "2019", 
         "url": "https://ieeexplore.ieee.org/document/9048728", 
         
         "author": [ 
            "Daniel Tandler","Sebastian Dörner","Sebastian Cammerer","Stephan ten Brink"
         ],
         "authors": [
         	
            	{"first" : "Daniel",	"last" : "Tandler"},
            	{"first" : "Sebastian",	"last" : "Dörner"},
            	{"first" : "Sebastian",	"last" : "Cammerer"},
            	{"first" : "Stephan",	"last" : "ten Brink"}
         ],
         "pages": "537-543",
         "eprint" : "1905.09983",
         
         "archiveprefix" : "arXiv",
         
         "primaryclass" : "cs.IT",
         
         "bibtexKey": "RnnConvDecoding"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2acdd3c9a5b2b94a741489e25b9d410d2/scammerer",         
         "tags" : [
            "coding","ml","myown"
         ],
         
         "intraHash" : "acdd3c9a5b2b94a741489e25b9d410d2",
         "interHash" : "fa9ab2e8cc93b3bf45927278baeb8e0d",
         "label" : "Deep Learning-Based Polar Code Design",
         "user" : "scammerer",
         "description" : "Deep Learning-Based Polar Code Design - IEEE Conference Publication",
         "date" : "2020-03-23 22:16:01",
         "changeDate" : "2020-03-23 21:34:27",
         "count" : 3,
         "pub-type": "inproceedings",
         "booktitle": "2019 57th Annual Allerton Conference on Communication, Control, and Computing (Allerton)",
         "year": "2019", 
         "url": "https://ieeexplore.ieee.org/document/8919804", 
         
         "author": [ 
            "M. Ebada","S. Cammerer","A. Elkelesh","S. ten Brink"
         ],
         "authors": [
         	
            	{"first" : "M.",	"last" : "Ebada"},
            	{"first" : "S.",	"last" : "Cammerer"},
            	{"first" : "A.",	"last" : "Elkelesh"},
            	{"first" : "S.",	"last" : "ten Brink"}
         ],
         "pages": "177-183","abstract": "In this work, we introduce a deep learning-based polar code construction algorithm. The core idea is to represent the information/frozen bit indices of a polar code as a binary vector which can be interpreted as trainable weights of a neural network (NN). For this, we demonstrate how this binary vector can be relaxed to a soft-valued vector, facilitating the learning process through gradient descent and enabling an efficient code construction. We further show how different polar code design constraints (e.g., code rate) can be taken into account by means of careful binary-to-soft and soft-to-binary conversions, along with rate-adjustment after each learning iteration. Besides its conceptual simplicity, this approach benefits from having the \u201Cdecoder-in-the-toop\u201D, i.e., the nature of the decoder is inherently taken into consideration while learning (designing) the polar code. We show results for belief propagation (BP) decoding over both AWGN and Rayleigh fading channels with considerable performance gains over state-of-the-art construction schemes.",
         "issn" : "null",
         
         "doi" : "10.1109/ALLERTON.2019.8919804",
         
         "bibtexKey": "8919804"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/26a5f7e30a790e5c4b0c5919ddf9b92f8/scammerer",         
         "tags" : [
            "coding","ml","myown"
         ],
         
         "intraHash" : "6a5f7e30a790e5c4b0c5919ddf9b92f8",
         "interHash" : "ce330c89e256ef4a39c1704d366ed2ea",
         "label" : "Scaling Deep Learning-Based Decoding of Polar Codes via Partitioning",
         "user" : "scammerer",
         "description" : "Scaling Deep Learning-Based Decoding of Polar Codes via Partitioning - IEEE Conference Publication",
         "date" : "2020-03-23 22:15:28",
         "changeDate" : "2020-03-23 21:34:27",
         "count" : 3,
         "pub-type": "inproceedings",
         "booktitle": "GLOBECOM 2017 - 2017 IEEE Global Communications Conference",
         "year": "2017", 
         "url": "https://ieeexplore.ieee.org/document/8254811", 
         
         "author": [ 
            "S. Cammerer","T. Gruber","J. Hoydis","S. ten Brink"
         ],
         "authors": [
         	
            	{"first" : "S.",	"last" : "Cammerer"},
            	{"first" : "T.",	"last" : "Gruber"},
            	{"first" : "J.",	"last" : "Hoydis"},
            	{"first" : "S.",	"last" : "ten Brink"}
         ],
         "pages": "1-6","abstract": "The training complexity of deep learning-based channel decoders scales exponentially with the codebook size and therefore with the number of information bits. Thus, neural network decoding (NND) is currently only feasible for very short block lengths. In this work, we show that the conventional iterative decoding algorithm for polar codes can be enhanced when sub-blocks of the decoder are replaced by neural network (NN) based components. Thus, we partition the encoding graph into smaller sub-blocks and train them individually, closely approaching maximum a posteriori (MAP) performance per sub-block. These blocks are then connected via the remaining conventional belief propagation decoding stage(s). The resulting decoding algorithm is non-iterative and inherently enables a highlevel of parallelization, while showing a competitive bit error rate (BER) performance. We examine the degradation through partitioning and compare the resulting decoder to state-of-the art polar decoders such as successive cancellation list and belief propagation decoding.",
         "issn" : "null",
         
         "doi" : "10.1109/GLOCOM.2017.8254811",
         
         "bibtexKey": "8254811"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2993cd23c81524e8c474582574f8f9895/scammerer",         
         "tags" : [
            "coding","ml","myown"
         ],
         
         "intraHash" : "993cd23c81524e8c474582574f8f9895",
         "interHash" : "5f62981eb5c5c7a40a15add6257225da",
         "label" : "On deep learning-based channel decoding",
         "user" : "scammerer",
         "description" : "On deep learning-based channel decoding - IEEE Conference Publication",
         "date" : "2020-03-23 20:38:47",
         "changeDate" : "2020-03-23 21:34:27",
         "count" : 3,
         "pub-type": "inproceedings",
         "booktitle": "2017 51st Annual Conference on Information Sciences and Systems (CISS)",
         "year": "2017", 
         "url": "https://ieeexplore.ieee.org/document/7926071", 
         
         "author": [ 
            "T. Gruber","S. Cammerer","J. Hoydis","S. t. Brink"
         ],
         "authors": [
         	
            	{"first" : "T.",	"last" : "Gruber"},
            	{"first" : "S.",	"last" : "Cammerer"},
            	{"first" : "J.",	"last" : "Hoydis"},
            	{"first" : "S.",	"last" : "t. Brink"}
         ],
         "pages": "1-6","abstract": "We revisit the idea of using deep neural networks for one-shot decoding of random and structured codes, such as polar codes. Although it is possible to achieve maximum a posteriori (MAP) bit error rate (BER) performance for both code families and for short codeword lengths, we observe that (i) structured codes are easier to learn and (ii) the neural network is able to generalize to codewords that it has never seen during training for structured, but not for random codes. These results provide some evidence that neural networks can learn a form of decoding algorithm, rather than only a simple classifier. We introduce the metric normalized validation error (NVE) in order to further investigate the potential and limitations of deep learning-based decoding with respect to performance and complexity.",
         "issn" : "null",
         
         "doi" : "10.1109/CISS.2017.7926071",
         
         "bibtexKey": "7926071"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/28ede931e14dcbd0204afb9c68ec874b5/sdnr",         
         "tags" : [
            "coding","ml","myown"
         ],
         
         "intraHash" : "8ede931e14dcbd0204afb9c68ec874b5",
         "interHash" : "2c63a5029a56d0ddef6d1911b13e4ec9",
         "label" : "On Recurrent Neural Networks for Sequence-based Processing in Communications",
         "user" : "sdnr",
         "description" : "",
         "date" : "2020-03-20 15:19:02",
         "changeDate" : "2022-03-22 11:20:41",
         "count" : 4,
         "pub-type": "inproceedings",
         "booktitle": "2019 53rd Asilomar Conference on Signals, Systems, and Computers",
         "year": "2019", 
         "url": "https://ieeexplore.ieee.org/document/9048728", 
         
         "author": [ 
            "Daniel Tandler","Sebastian Dörner","Sebastian Cammerer","Stephan ten Brink"
         ],
         "authors": [
         	
            	{"first" : "Daniel",	"last" : "Tandler"},
            	{"first" : "Sebastian",	"last" : "Dörner"},
            	{"first" : "Sebastian",	"last" : "Cammerer"},
            	{"first" : "Stephan",	"last" : "ten Brink"}
         ],
         "pages": "537-543","abstract": "In this work, we analyze the capabilities and practical limitations of neural networks (NNs) for sequence-based signal processing which can be seen as an omnipresent property in almost any modern communication systems. In particular, we train multiple state-of-the-art recurrent neural network (RNN) structures to learn how to decode convolutional codes allowing a clear benchmarking with the corresponding maximum likelihood (ML) Viterbi decoder. We examine the decoding performance for various kinds of NN architectures, beginning with classical types like feedforward layers and gated recurrent unit (GRU)-layers, up to more recently introduced architectures such as temporal convolutional networks (TCNs) and differentiable neural computers (DNCs) with external memory. As a key limitation, it turns out that the training complexity increases exponentially with the length of the encoding memory ν and, thus, practically limits the achievable bit error rate (BER) performance. To overcome this limitation, we introduce a new training-method by gradually increasing the number of ones within the training sequences, i.e., we constrain the amount of possible training sequences in the beginning until first convergence. By consecutively adding more and more possible sequences to the training set, we finally achieve training success in cases that did not converge before via naive training. Further, we show that our network can learn to jointly detect and decode a quadrature phase shift keying (QPSK) modulated code with sub-optimal (anti-Gray) labeling in one-shot at a performance that would require iterations between demapper and decoder in classic detection schemes.",
         "eprint" : "1905.09983",
         
         "archiveprefix" : "arXiv",
         
         "primaryclass" : "cs.IT",
         
         "doi" : "10.1109/IEEECONF44664.2019.9048728",
         
         "bibtexKey": "RnnConvDecoding"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2077cf92897e7ba75fe6292a41f83ffb1/inue",         
         "tags" : [
            "myown","from:sdnr","coding","ml"
         ],
         
         "intraHash" : "077cf92897e7ba75fe6292a41f83ffb1",
         "interHash" : "2c63a5029a56d0ddef6d1911b13e4ec9",
         "label" : "On Recurrent Neural Networks for Sequence-based Processing in Communications",
         "user" : "inue",
         "description" : "",
         "date" : "2020-03-20 15:19:02",
         "changeDate" : "2020-03-20 14:19:02",
         "count" : 4,
         "pub-type": "misc",
         
         "year": "2019", 
         "url": "https://arxiv.org/abs/1905.09983", 
         
         "author": [ 
            "Daniel Tandler","Sebastian Dörner","Sebastian Cammerer","Stephan ten Brink"
         ],
         "authors": [
         	
            	{"first" : "Daniel",	"last" : "Tandler"},
            	{"first" : "Sebastian",	"last" : "Dörner"},
            	{"first" : "Sebastian",	"last" : "Cammerer"},
            	{"first" : "Stephan",	"last" : "ten Brink"}
         ],
         
         "eprint" : "1905.09983",
         
         "archiveprefix" : "arXiv",
         
         "primaryclass" : "cs.IT",
         
         "bibtexKey": "RnnConvDecoding"

      }
	  
   ]
}
