
{  
   "types" : {
      "Bookmark" : {
         "pluralLabel" : "Bookmarks"
      },
      "Publication" : {
         "pluralLabel" : "Publications"
      },
      "GoldStandardPublication" : {
         "pluralLabel" : "GoldStandardPublications"
      },
      "GoldStandardBookmark" : {
         "pluralLabel" : "GoldStandardBookmarks"
      },
      "Tag" : {
         "pluralLabel" : "Tags"
      },
      "User" : {
         "pluralLabel" : "Users"
      },
      "Group" : {
         "pluralLabel" : "Groups"
      },
      "Sphere" : {
         "pluralLabel" : "Spheres"
      }
   },
   
   "properties" : {
      "count" : {
         "valueType" : "number"
      },
      "date" : {
         "valueType" : "date"
      },
      "changeDate" : {
         "valueType" : "date"
      },
      "url" : {
         "valueType" : "url"
      },
      "id" : {
         "valueType" : "url"
      },
      "tags" : {
         "valueType" : "item"
      },
      "user" : {
         "valueType" : "item"
      }      
   },
   
   "items" : [
   	  
	  {  
         "type" : "Bookmark",
         "id"   : "https://puma.ub.uni-stuttgart.de/url/ce32d3ac672323296914e5376504226b/droessler",
         "tags" : [
            "code","editor","markdown","microsoft","open","schreiben","software","source","studio","visual"
         ],
         
         "intraHash" : "ce32d3ac672323296914e5376504226b",
         "label" : "eBook: Visual Studio Code - Tipps & Tricks Vol. 1 (Deutsch/Englisch)",
         "user" : "droessler",
         "description" : "Ein kostenloser Open Source Code Editor zum Entwickeln und Debuggen moderner Web- und Cloud-Anwendungen. Kostenlos verfügbar für Linux, Mac OS X und Windows.",
         "date" : "2022-08-19 15:41:18",
         "changeDate" : "2022-08-19 13:41:18",
         "count" : 1,
         "url" : "https://www.microsoft.com/de-de/techwiese/aktionen/visual-studio-code-ebook-download.aspx"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2929267cb5e0b5264e87dad614891c917/hcics",         
         "tags" : [
            "(EOG),","Analysis,","Awareness,","Cognition-","Cognitive","Context,","Electrooculography","Eye","Memory","Movement","Recall","Visual","hcics","vis"
         ],
         
         "intraHash" : "929267cb5e0b5264e87dad614891c917",
         "interHash" : "479a426bac9c064356a5ed821e20af91",
         "label" : "Recognition of Visual Memory Recall Processes Using Eye Movement Analysis",
         "user" : "hcics",
         "description" : "",
         "date" : "2024-07-11 10:05:52",
         "changeDate" : "2024-07-11 10:11:36",
         "count" : 2,
         "pub-type": "inproceedings",
         "booktitle": "Proc. ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp)",
         "year": "2011", 
         "url": "", 
         
         "author": [ 
            "Andreas Bulling","Daniel Roggen"
         ],
         "authors": [
         	
            	{"first" : "Andreas",	"last" : "Bulling"},
            	{"first" : "Daniel",	"last" : "Roggen"}
         ],
         "pages": "455-464","abstract": "Physical activity, location, as well as a person's psychophysiological and affective state are common dimensions for developing context-aware systems in ubiquitous computing. An important yet missing contextual dimension is the cognitive context that comprises all aspects related to mental information processing, such as perception, memory, knowledge, or learning. In this work we investigate the feasibility of recognising visual memory recall. We use a recognition methodology that combines minimum redundancy maximum relevance feature selection (mRMR) with a support vector machine (SVM) classifier. We validate the methodology in a dual user study with a total of fourteen participants looking at familiar and unfamiliar pictures from four picture categories: abstract, landscapes, faces, and buildings. Using person-independent training, we are able to discriminate between familiar and unfamiliar abstract pictures with a top recognition rate of 84.3% (89.3% recall, 21.0% false positive rate) over all participants. We show that eye movement analysis is a promising approach to infer the cognitive context of a person and discuss the key challenges for the real-world implementation of eye-based cognition-aware systems.",
         "doi" : "10.1145/2030112.2030172",
         
         "bibtexKey": "bulling11_ubicomp"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/207ccf57112e1bc861e5a36005270aae0/brothaupt",         
         "tags" : [
            "assistance","augmentation","helicopter","ifr","myown","simulator","study","ultralight","visual"
         ],
         
         "intraHash" : "07ccf57112e1bc861e5a36005270aae0",
         "interHash" : "42c0f595c380e026bb55776dac84745a",
         "label" : "Simulator-Based Evaluation of Visual Pilot Assistance for Coaxial Ultralight Helicopters",
         "user" : "brothaupt",
         "description" : "",
         "date" : "2024-02-02 15:32:13",
         "changeDate" : "2026-01-19 12:06:27",
         "count" : 2,
         "pub-type": "inproceedings",
         "booktitle": "Proceedings of the Vertical Flight Society 79th Annual Forum","publisher":"The Vertical Flight Society",
         "year": "2023", 
         "url": "http://dx.doi.org/10.4050/F-0079-2023-18029", 
         
         "author": [ 
            "Benjamin Rothaupt","Manuel Spülbeck","Walter Fichter"
         ],
         "authors": [
         	
            	{"first" : "Benjamin",	"last" : "Rothaupt"},
            	{"first" : "Manuel",	"last" : "Spülbeck"},
            	{"first" : "Walter",	"last" : "Fichter"}
         ],
         "abstract": "This paper presents a flight simulator study that examines whether a display inside the cockpit can aid helicopter pilots with little to no experience in completing basic maneuvers. The study participants have no prior experience as helicopter pilots. The flight simulation uses a dynamic model of a coaxial ultralight helicopter horizontal motion that includes a stability augmentation system. A virtual reality headset is used to give the participants a realistic sense of perspective. The benchmark task includes decelerating into hover and hovering above a target for a given time. Three cueing configurations are compared. One includes visual cues on the ground that mark the hover target position. The two others add either a heads down display or a heads up display inside the cockpit, which visualize the relative target position and a prediction of the helicopter motion. With the proposed displays available inside the cockpit, participants tend to reach the target faster and more consistently. Hover performance is not improved by an additional display as the pilots mostly rely on visual cues on the ground during hover. In summary, both log data and pilot feedback suggest that the proposed displays are primarily beneficial in flight phases where the helicopter moves.",
         "doi" : "10.4050/f-0079-2023-18029",
         
         "bibtexKey": "Rothaupt_2023"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/20dfb045213232a2162921e36d01a9aa6/jmueller",         
         "tags" : [
            "adaptive","based","ceramics,","control","controllable","crystal","devices,","direct","electrochromic","facade","films,","glass","glazed","glazing","glazing,","heat","intelligent","liquid","materials,","mechanism,","optical","optimisation,","optimization,","performance","properties","protection,","self-adjusting","smart","sobek","solar","sunlight","sunlight,","survey,","system","system,","systems,","thermochromic","transfer,","units,","visual","windows,"
         ],
         
         "intraHash" : "0dfb045213232a2162921e36d01a9aa6",
         "interHash" : "01f845c3c121cf815ea4a02f5fee1662",
         "label" : "Potential of origami-based shell elements as next-generation envelope components",
         "user" : "jmueller",
         "description" : "",
         "date" : "2023-11-27 15:10:57",
         "changeDate" : "2023-11-27 15:10:57",
         "count" : 10,
         "pub-type": "inproceedings",
         "booktitle": "2017 IEEE International Conference on Advanced Intelligent Mechatronics (AIM), July 3-7, 2017, Munich",
         "year": "2017", 
         "url": "", 
         
         "author": [ 
            "Yves Klett","Peter Middendorf","Werner Sobek","Walter Haase","Michael Heidingsfeld"
         ],
         "authors": [
         	
            	{"first" : "Yves",	"last" : "Klett"},
            	{"first" : "Peter",	"last" : "Middendorf"},
            	{"first" : "Werner",	"last" : "Sobek"},
            	{"first" : "Walter",	"last" : "Haase"},
            	{"first" : "Michael",	"last" : "Heidingsfeld"}
         ],
         "pages": "916--920","abstract": "Building envelopes manage several crucial functions,\nincluding structural, thermal, hygric and aesthetic functions.\nClassic fac¸ade concepts usually work with static elements like glass, metal or composite panels that primarily provide protection against the elements, and an additional layer of active systems that manage dynamic tasks like light protection or thermal regulation. Kinematic shell elements offer new ways to incorporate multiple dynamic functionalities into cladding\nelements, and thus can help to generate new active, efficient and\naesthetic envelopes. We will introduce the concept of origami-inspired\nmultifunctional shell elements and discuss potential\napplications.",
         "doi" : "10.1109/AIM.2017.8014135",
         
         "bibtexKey": "klett_potential_2017"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2b0808e52c918c386f81806471438cd84/jmueller",         
         "tags" : [
            "adaptive","based","ceramics,","control","controllable","crystal","devices,","direct","electrochromic","facade","films,","glass","glazed","glazing","glazing,","heat","intelligent","liquid","materials,","mechanism,","optical","optimisation,","optimization,","performance","properties","protection,","self-adjusting","smart","sobek","solar","sunlight","sunlight,","survey,","system","system,","systems,","thermochromic","transfer,","units,","visual","windows,"
         ],
         
         "intraHash" : "b0808e52c918c386f81806471438cd84",
         "interHash" : "8dfb7a35148f9e27aa8e8d039406f7f2",
         "label" : "Adaptive glazing systems - survey of systems",
         "user" : "jmueller",
         "description" : "",
         "date" : "2023-11-27 15:10:57",
         "changeDate" : "2023-11-27 15:10:57",
         "count" : 1,
         "pub-type": "inproceedings",
         "booktitle": "2017 IEEE International Conference on Advanced Intelligent Mechatronics (AIM), July 3-7, 2017, Munich",
         "year": "2017", 
         "url": "", 
         
         "author": [ 
            "Walter Haase","Marzena Husser","Werner Sobek"
         ],
         "authors": [
         	
            	{"first" : "Walter",	"last" : "Haase"},
            	{"first" : "Marzena",	"last" : "Husser"},
            	{"first" : "Werner",	"last" : "Sobek"}
         ],
         "pages": "929--933","abstract": "Glazed facade units must satisfy numerous criteria. In addition to allowing an unobstructed view of the exterior they should also provide protection from direct sunlight and the associated heat transfer. In order to optimize the performance of glazed facades under varying conditions, much effort has been directed towards the development of adaptive glazing systems based on smart materials or smart mechanism. This article will outline the functional principles and visual properties of one self-adjusting, thermochromic glazing, two controllable electrochromic systems and one liquid crystal based system.",
         "doi" : "10.1109/AIM.2017.8014137",
         
         "bibtexKey": "haase_adaptive_2017"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/22bd4bdf4e64b52fe8c55f005f7df9e16/mgeiger",         
         "tags" : [
            "adaptive","based","ceramics,","control","controllable","crystal","devices,","direct","electrochromic","facade","films,","glass","glazed","glazing","glazing,","heat","intelligent","liquid","materials,","mechanism,","optical","optimisation,","optimization,","performance","properties","protection,","self-adjusting","smart","solar","sunlight","sunlight,","survey,","system","system,","systems,","thermochromic","transfer,","units,","visual","windows,"
         ],
         
         "intraHash" : "2bd4bdf4e64b52fe8c55f005f7df9e16",
         "interHash" : "6824b2fe2fc917817789f612dae3555c",
         "label" : "Automated numerical process chain for the design of folded sandwich cores",
         "user" : "mgeiger",
         "description" : "",
         "date" : "2019-11-08 16:11:00",
         "changeDate" : "2019-11-08 15:14:53",
         "count" : 8,
         "pub-type": "inproceedings",
         "booktitle": "7th International Meeting on Origami in Science, Mathematics and Education (OSME), September 5-7, 2018, Oxford, UK",
         "year": "2018", 
         "url": "", 
         
         "author": [ 
            "Fabian Muhs","Yves Klett","Peter Middendorf"
         ],
         "authors": [
         	
            	{"first" : "Fabian",	"last" : "Muhs"},
            	{"first" : "Yves",	"last" : "Klett"},
            	{"first" : "Peter",	"last" : "Middendorf"}
         ],
         "volume": "4","pages": "1043--1058",
         "isbn" : "978-1-911093-92-3",
         
         "bibtexKey": "muhs_automated_2018"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2e34621ddb518cfb894557f38de91b22e/markusjohn",         
         "tags" : [
            "Analytics,","Humanities,","Visual","Visualization,"
         ],
         
         "intraHash" : "e34621ddb518cfb894557f38de91b22e",
         "interHash" : "19f1bd8a9014b378330f213b2272536e",
         "label" : "MultiCloud: Interactive Word Cloud Visualization for Multiple Texts",
         "user" : "markusjohn",
         "description" : "",
         "date" : "2018-06-27 14:18:53",
         "changeDate" : "2018-06-28 13:52:38",
         "count" : 1,
         "pub-type": "article",
         "journal": "Proceedings of Graphics Interface (2018)",
         "year": "2018", 
         "url": "", 
         
         "author": [ 
            "Markus John","Eduard Marbach","Steffen Lohmann","Florian Heimerl","Thomas Ertl"
         ],
         "authors": [
         	
            	{"first" : "Markus",	"last" : "John"},
            	{"first" : "Eduard",	"last" : "Marbach"},
            	{"first" : "Steffen",	"last" : "Lohmann"},
            	{"first" : "Florian",	"last" : "Heimerl"},
            	{"first" : "Thomas",	"last" : "Ertl"}
         ],
         "volume": "44",
         "bibtexKey": "johnmulticloud"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2c15cdea0c6cf2b5905b9856d8f19a0c7/markusjohn",         
         "tags" : [
            "Visual","analysis","digital"
         ],
         
         "intraHash" : "c15cdea0c6cf2b5905b9856d8f19a0c7",
         "interHash" : "0de674c3c3848876fdd9f697aa67978c",
         "label" : "A Visual Analytics Approach for Semantic Multi-Video Annotation",
         "user" : "markusjohn",
         "description" : "",
         "date" : "2018-02-12 14:50:38",
         "changeDate" : "2018-02-19 11:48:10",
         "count" : 1,
         "pub-type": "article",
         
         "year": "2017", 
         "url": "", 
         
         "author": [ 
            "Markus John","Kuno Kurzhals","Steffen Koch","Daniel Weiskopf"
         ],
         "authors": [
         	
            	{"first" : "Markus",	"last" : "John"},
            	{"first" : "Kuno",	"last" : "Kurzhals"},
            	{"first" : "Steffen",	"last" : "Koch"},
            	{"first" : "Daniel",	"last" : "Weiskopf"}
         ],
         
         "bibtexKey": "johnvisual"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2ecfdb2163a6a36dc68a459561138ce64/henrykhangvu",         
         "tags" : [
            "Data","Exploration","Visual"
         ],
         
         "intraHash" : "ecfdb2163a6a36dc68a459561138ce64",
         "interHash" : "868eb8c0c45b2947192f1f0b6f6ac1b5",
         "label" : "Provenance-based Recommendations for Visual Data Exploration",
         "user" : "henrykhangvu",
         "description" : "",
         "date" : "2017-06-04 21:31:58",
         "changeDate" : "2017-06-04 19:31:58",
         "count" : 15,
         "pub-type": "inproceedings",
         
         "year": "2017", 
         "url": "", 
         
         "author": [ 
            "Melanie Herschel","House Ben Lahmar"
         ],
         "authors": [
         	
            	{"first" : "Melanie",	"last" : "Herschel"},
            	{"first" : "House",	"last" : "Ben Lahmar"}
         ],
         
         "bibtexKey": "herschel2017provenancebased"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/231c5727eb88426afb28ed3dd780d71bc/markusjohn",         
         "tags" : [
            "Movie","analysis,","analytics","myown","video","visual","visualization,"
         ],
         
         "intraHash" : "31c5727eb88426afb28ed3dd780d71bc",
         "interHash" : "b5d426101d290c9be0f34bb94afa732c",
         "label" : "Visual Movie Analytics",
         "user" : "markusjohn",
         "description" : "",
         "date" : "2017-03-06 17:17:40",
         "changeDate" : "2017-03-10 09:38:39",
         "count" : 1,
         "pub-type": "inproceedings",
         "booktitle": "IEEE TRANSACTIONS ON MULTIMEDIA, VOL. 18",
         "year": "2017", 
         "url": "", 
         
         "author": [ 
            "Kuno Kurzhals","Markus John","Florian Heimerl","Paul Kuznecov","Daniel Weiskop"
         ],
         "authors": [
         	
            	{"first" : "Kuno",	"last" : "Kurzhals"},
            	{"first" : "Markus",	"last" : "John"},
            	{"first" : "Florian",	"last" : "Heimerl"},
            	{"first" : "Paul",	"last" : "Kuznecov"},
            	{"first" : "Daniel",	"last" : "Weiskop"}
         ],
         "pages": "51","abstract": "The analysis of inherent structures of movies plays\r\nan important role in studying stylistic devices and specific,\r\ncontent-related questions. Examples are the analysis of personal\r\nconstellations in movie scenes, dialogue-based content analysis,\r\nor the investigation of image-based features. We provide a visual\r\nanalytics approach that supports the analytical reasoning process\r\nto derive higher level insights about the content on a semantic\r\nlevel. Combining automatic methods for semantic scene analysis\r\nbased on script and subtitle text, we perform a low-level analysis\r\nof the data automatically. Our approach features an interactive\r\nvisualization that allows a multilayer interpretation of descriptive\r\nfeatures to characterize movie content. For semantic analysis, we\r\nextract scene information from movie scripts and match them with\r\nthe corresponding subtitles. With text- and image-based query\r\ntechniques, we facilitate an interactive comparison of different\r\nmovie scenes on an image and on a semantic level. We demonstrate\r\nhow our approach can be applied for content analysis on a popular\r\nHollywood movie.",
         "bibtexKey": "conf/siggraph/Huh96"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2b1f872da093bff4f949ba42c67bfb1b0/markusjohn",         
         "tags" : [
            "Analytics,","Close","Digital","Distant","Humanities,","Reading","Text","Visual","Visualization,","myown"
         ],
         
         "intraHash" : "b1f872da093bff4f949ba42c67bfb1b0",
         "interHash" : "2d94ee951b425ccdc4c9dad2787b8042",
         "label" : "Visual Analytics for Narrative Text\r\nVisualizing Characters and their Relationships as Extracted from Novels",
         "user" : "markusjohn",
         "description" : "",
         "date" : "2017-03-06 17:10:42",
         "changeDate" : "2017-03-10 09:39:02",
         "count" : 2,
         "pub-type": "article",
         "journal": "In Proceedings of the 6th International Conference on Information Visualization Theory and Applications",
         "year": "2016", 
         "url": "", 
         
         "author": [ 
            "Markus John","Steffen Lohmann","Steffen Koch","Michael Wörner","Thomas Ertl"
         ],
         "authors": [
         	
            	{"first" : "Markus",	"last" : "John"},
            	{"first" : "Steffen",	"last" : "Lohmann"},
            	{"first" : "Steffen",	"last" : "Koch"},
            	{"first" : "Michael",	"last" : "Wörner"},
            	{"first" : "Thomas",	"last" : "Ertl"}
         ],
         "abstract": "The study of novels and the analysis of their plot, characters and other entities are time-consuming and complex\r\ntasks in literary science. The digitization of literature and the proliferation of electronic books provide\r\nnew opportunities to support these tasks with visual abstractions. Methods from the fields of computational linguistics\r\ncan be used to automatically extract entities and their relations from digitized novels, which can then\r\nbe visualized to ease exploration and analysis tasks. This paper presents a web-based approach that combines\r\nautomatic analysis methods with effective visualization techniques. Different views on the extracted entities\r\nare provided and relations between them across the plot are indicated. Two usage scenarios show successful\r\napplications of the approach and demonstrate its benefits and limitations.",
         "bibtexKey": "noauthororeditor"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/279820e1562a3b7af96d90ebb328ba161/markusjohn",         
         "tags" : [
            "analytics,","document","focus+context","interaction","mining,","myown","techniques,","text","visual","visualization"
         ],
         
         "intraHash" : "79820e1562a3b7af96d90ebb328ba161",
         "interHash" : "d6457d13954d6aefb50c5b7ef91cfb35",
         "label" : "DocuCompass: Effective Exploration of Document Landscapes",
         "user" : "markusjohn",
         "description" : "",
         "date" : "2017-03-06 16:42:45",
         "changeDate" : "2017-03-10 09:39:35",
         "count" : 2,
         "pub-type": "article",
         "journal": "IEEE Transactions on Visualization and Computer Graphics",
         "year": "2017", 
         "url": "", 
         
         "author": [ 
            "Florian Heimerl","Markus John","Qi Han","Steffen Koch","Thomas Ertl"
         ],
         "authors": [
         	
            	{"first" : "Florian",	"last" : "Heimerl"},
            	{"first" : "Markus",	"last" : "John"},
            	{"first" : "Qi",	"last" : "Han"},
            	{"first" : "Steffen",	"last" : "Koch"},
            	{"first" : "Thomas",	"last" : "Ertl"}
         ],
         "abstract": "The creation of interactive visualization to analyze text documents has gained an impressive momentum in recent years.\r\nThis is not surprising in the light of massive and still increasing amounts of available digitized texts.\r\nWebsites, social media, news wire, and digital libraries are just few examples of the diverse text sources whose visual analysis and exploration offers new opportunities to effectively mine and manage the information and knowledge hidden within them.\r\nA popular visualization method for large text collections is to represent each document by a glyph in 2D space.\r\nThese landscapes can be the result of optimizing pairwise distances in 2D to represent document similarities, or they are provided directly as meta data, such as geo-locations.\r\nFor well-defined information needs, suitable interaction methods are available for these spatializations.\r\nHowever, free exploration and navigation on a level of abstraction between a labeled document spatialization and reading single documents is largely unsupported.\r\nAs a result, vital foraging steps for task-tailored actions, such as selecting subgroups of documents for detailed inspection, or subsequent sense-making steps are hampered.\r\nTo fill in this gap, we propose DocuCompass, a focus+context approach based on the lens metaphor.\r\nIt comprises multiple methods to characterize local groups of documents, and to efficiently guide exploration based on users' requirements.\r\nDocuCompass thus allows for effective interactive exploration of document landscapes without disrupting the mental map of users by changing the layout itself.\r\nWe discuss the suitability of multiple navigation and characterization methods for different spatializations and texts.\r\nFinally, we provide insights generated through user feedback and discuss the effectiveness of our approach.",
         "bibtexKey": "noauthororeditor"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2b3bb778cf3609f98508db5321c4a6f3c/thomasrichter",         
         "tags" : [
            "Abdominal;Reproducibility","Compression;Humans;Image","Computed","Computer-Assisted;Observation;ROC","Curve;Data","Curve;Radiography,","Nonparametric;Tomography,","Processing,","Results;Statistics,","Under","X-Ray","analysis;CT;HDR-VDP;JPEG2000","biological","coding;Computed","coding;Medical","coding;medical","coefficients;abdomen;computed","compression;MS-SSIM;PSNR;Spearman","compression;diagnostic","compression;image","correlation","diagnostic","difference","fidelity","image","imaging;Computed","imaging;PSNR;Radiology;Transform","metric;Adult;Area","metrics;multiscale","of","organs;computerised","predictor;image","processing;sensitivity","radiography;image","range","rank","ratio;Abdomen;Biomedical","science;Hospitals;Image","signal-to-noise","similarity;peak","structural","tomography;Computer","tomography;JPEG2000;image","tomography;data","tomography;high-dynamic","visual"
         ],
         
         "intraHash" : "b3bb778cf3609f98508db5321c4a6f3c",
         "interHash" : "39d4c0d08b7efb8cbf0f7a1c55cf1acd",
         "label" : "A Comparison of Three Image Fidelity Metrics of Different Computational Principles for JPEG2000 Compressed Abdomen CT Images",
         "user" : "thomasrichter",
         "description" : "",
         "date" : "2016-03-10 09:18:49",
         "changeDate" : "2016-03-10 08:20:00",
         "count" : 3,
         "pub-type": "article",
         "journal": "Medical Imaging, IEEE Transactions on",
         "year": "2010", 
         "url": "http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5482182", 
         
         "author": [ 
            "Kil Joong Kim","Bohyoung Kim","R. Mantiuk","T. Richter","Hyunna Lee","Heung-Sik Kang","Jinwook Seo","Kyoung Ho Lee"
         ],
         "authors": [
         	
            	{"first" : "Kil Joong",	"last" : "Kim"},
            	{"first" : "Bohyoung",	"last" : "Kim"},
            	{"first" : "R.",	"last" : "Mantiuk"},
            	{"first" : "T.",	"last" : "Richter"},
            	{"first" : "Hyunna",	"last" : "Lee"},
            	{"first" : "Heung-Sik",	"last" : "Kang"},
            	{"first" : "Jinwook",	"last" : "Seo"},
            	{"first" : "Kyoung Ho",	"last" : "Lee"}
         ],
         "volume": "29","number": "8","pages": "1496-1503","abstract": "This study aimed to evaluate three image fidelity metrics of different computational principles-peak signal-to-noise ratio (PSNR), high-dynamic range visual difference predictor (HDR-VDP), and multiscale structural similarity (MS-SSIM)-in measuring the fidelity of JPEG2000 compressed abdomen computed tomography images from a viewpoint of visually lossless compression. Three hundred images with 0.67- or 5-mm section thickness were compressed to one of five compression ratios ranging from reversible compression to 15:1. The fidelity of each compressed image was measured by five radiologists' visual analyses (distinguishable or indistinguishable from the original) and the three metrics. The Spearman rank correlation coefficients of the PSNR, HDR-VDP, and MS-SSIM values with the number of readers responding as indistinguishable were 0.86, 0.94, and 0.86, respectively. Using the pooled readers' responses as the reference standard, the area under the receiver-operating-characteristic curve for the HDR-VDP (0.99) was significantly greater than that for the PSNR (0.95) (p <; 0.001) and for the MS-SSIM (0.96) (p = 0.003), and there was no significant difference between the PSNR and MS-SSIM (p = 0.70). In measuring the image fidelity, the HDR-VDP outperforms the PSNR and MS-SSIM, and the MS-SSIM and PSNR are comparable.",
         "issn" : "0278-0062",
         
         "doi" : "10.1109/TMI.2010.2049655",
         
         "bibtexKey": "kim2010comparison"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2b3bb778cf3609f98508db5321c4a6f3c/rainerreichel",         
         "tags" : [
            "Abdominal;Reproducibility","Compression;Humans;Image","Computed","Computer-Assisted;Observation;ROC","Curve;Data","Curve;Radiography,","Nonparametric;Tomography,","Processing,","Results;Statistics,","Under","X-Ray","analysis;CT;HDR-VDP;JPEG2000","biological","coding;Computed","coding;Medical","coding;medical","coefficients;abdomen;computed","compression;MS-SSIM;PSNR;Spearman","compression;diagnostic","compression;image","correlation","diagnostic","difference","fidelity","image","imaging;Computed","imaging;PSNR;Radiology;Transform","metric;Adult;Area","metrics;multiscale","of","organs;computerised","predictor;image","processing;sensitivity","radiography;image","range","rank","ratio;Abdomen;Biomedical","science;Hospitals;Image","signal-to-noise","similarity;peak","structural","tomography;Computer","tomography;JPEG2000;image","tomography;data","tomography;high-dynamic","visual"
         ],
         
         "intraHash" : "b3bb778cf3609f98508db5321c4a6f3c",
         "interHash" : "39d4c0d08b7efb8cbf0f7a1c55cf1acd",
         "label" : "A Comparison of Three Image Fidelity Metrics of Different Computational Principles for JPEG2000 Compressed Abdomen CT Images",
         "user" : "rainerreichel",
         "description" : "",
         "date" : "2016-03-03 17:45:04",
         "changeDate" : "2016-03-04 09:57:29",
         "count" : 3,
         "pub-type": "article",
         "journal": "Medical Imaging, IEEE Transactions on",
         "year": "2010", 
         "url": "http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5482182", 
         
         "author": [ 
            "Kil Joong Kim","Bohyoung Kim","R. Mantiuk","T. Richter","Hyunna Lee","Heung-Sik Kang","Jinwook Seo","Kyoung Ho Lee"
         ],
         "authors": [
         	
            	{"first" : "Kil Joong",	"last" : "Kim"},
            	{"first" : "Bohyoung",	"last" : "Kim"},
            	{"first" : "R.",	"last" : "Mantiuk"},
            	{"first" : "T.",	"last" : "Richter"},
            	{"first" : "Hyunna",	"last" : "Lee"},
            	{"first" : "Heung-Sik",	"last" : "Kang"},
            	{"first" : "Jinwook",	"last" : "Seo"},
            	{"first" : "Kyoung Ho",	"last" : "Lee"}
         ],
         "volume": "29","number": "8","pages": "1496-1503","abstract": "This study aimed to evaluate three image fidelity metrics of different computational principles-peak signal-to-noise ratio (PSNR), high-dynamic range visual difference predictor (HDR-VDP), and multiscale structural similarity (MS-SSIM)-in measuring the fidelity of JPEG2000 compressed abdomen computed tomography images from a viewpoint of visually lossless compression. Three hundred images with 0.67- or 5-mm section thickness were compressed to one of five compression ratios ranging from reversible compression to 15:1. The fidelity of each compressed image was measured by five radiologists' visual analyses (distinguishable or indistinguishable from the original) and the three metrics. The Spearman rank correlation coefficients of the PSNR, HDR-VDP, and MS-SSIM values with the number of readers responding as indistinguishable were 0.86, 0.94, and 0.86, respectively. Using the pooled readers' responses as the reference standard, the area under the receiver-operating-characteristic curve for the HDR-VDP (0.99) was significantly greater than that for the PSNR (0.95) (p <; 0.001) and for the MS-SSIM (0.96) (p = 0.003), and there was no significant difference between the PSNR and MS-SSIM (p = 0.70). In measuring the image fidelity, the HDR-VDP outperforms the PSNR and MS-SSIM, and the MS-SSIM and PSNR are comparable.",
         "issn" : "0278-0062",
         
         "doi" : "10.1109/TMI.2010.2049655",
         
         "bibtexKey": "5482182"

      }
	  
   ]
}
