<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns:community="http://www.bibsonomy.org/ontologies/2008/05/community#" xmlns:foaf="http://xmlns.com/foaf/0.1/" xmlns:owl="http://www.w3.org/2002/07/owl#" xmlns:admin="http://webns.net/mvcb/" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:syn="http://purl.org/rss/1.0/modules/syndication/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:cc="http://web.resource.org/cc/" xmlns:xsd="http://www.w3.org/2001/XMLSchema#" xmlns:swrc="http://swrc.ontoware.org/ontology#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns="http://purl.org/rss/1.0/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xml:base="https://puma.ub.uni-stuttgart.de/tag/Parallelisation%20GPU"><owl:Ontology rdf:about=""><rdfs:comment>PUMA publications for /tag/Parallelisation%20GPU</rdfs:comment><owl:imports rdf:resource="http://swrc.ontoware.org/ontology/portal"/></owl:Ontology><rdf:Description rdf:about="https://puma.ub.uni-stuttgart.de/bibtex/2a4a5c9728db0683dda1b2b3318fa0c9d/amerwafai"><owl:sameAs rdf:resource="/uri/bibtex/2a4a5c9728db0683dda1b2b3318fa0c9d/amerwafai"/><rdf:type rdf:resource="http://swrc.ontoware.org/ontology#InProceedings"/><swrc:date>Fri Jan 29 09:34:55 CET 2016</swrc:date><swrc:booktitle>New Horizons in Web Based Learning</swrc:booktitle><swrc:month>December</swrc:month><swrc:pages>21-29</swrc:pages><swrc:publisher><swrc:Organization swrc:name="Springer Berlin Heidelberg"/></swrc:publisher><swrc:series>LNCS</swrc:series><swrc:title>Optimization of industrial Neural Network simulators for GPGPUs</swrc:title><swrc:volume>7697</swrc:volume><swrc:year>2011</swrc:year><swrc:keywords>Back CUDA GPGPU GPU HLRS Network Neural Parallelisation Propagation SCOPE myown </swrc:keywords><swrc:abstract>This paper introduces the porting of an industrial neural network simulator onto GPUs used in a tool-chain to sort massive amounts of E-mails and other textual data. Compared to other previous work, all steps are being executed on the GPU, achieving overall up to 33× speedup without using any cuBLAS functionality. All the time-consuming routines have been ported onto the GPU, i.e. the training-, the simulation- and the verification-phases, the training being the most time-consuming. It is planned to include these GPU-kernels into the product for special costumer&#039;s demands.</swrc:abstract><swrc:hasExtraField><swrc:Field swrc:value="2015-08-18 14:03:50 +0000" swrc:key="date-added"/></swrc:hasExtraField><swrc:hasExtraField><swrc:Field swrc:value="2015-08-18 14:20:22 +0000" swrc:key="date-modified"/></swrc:hasExtraField><swrc:author><rdf:Seq><rdf:_1><swrc:Person swrc:name="Mhd. Amer Wafai"/></rdf:_1><rdf:_2><swrc:Person swrc:name="Zaheer Ahmed"/></rdf:_2><rdf:_3><swrc:Person swrc:name="Rainer Keller"/></rdf:_3><rdf:_4><swrc:Person swrc:name="Sven Holzmann"/></rdf:_4><rdf:_5><swrc:Person swrc:name="Bj{\&#034;o}rn Sander"/></rdf:_5><rdf:_6><swrc:Person swrc:name="Michael Resch"/></rdf:_6></rdf:Seq></swrc:author><swrc:editor><rdf:Seq><rdf:_1><swrc:Person swrc:name="Dickson K. W. Chiu"/></rdf:_1><rdf:_2><swrc:Person swrc:name="Minhong Wang"/></rdf:_2><rdf:_3><swrc:Person swrc:name="Elvira Popescu"/></rdf:_3><rdf:_4><swrc:Person swrc:name="Qing Li"/></rdf:_4><rdf:_5><swrc:Person swrc:name="Rynson Lau"/></rdf:_5></rdf:Seq></swrc:editor></rdf:Description><foaf:Group rdf:about="https://puma.ub.uni-stuttgart.de/tag/Parallelisation%20GPU"><foaf:name>Parallelisation GPU</foaf:name><description>Community for tag(s) Parallelisation GPU</description></foaf:Group></rdf:RDF>