
{  
   "types" : {
      "Bookmark" : {
         "pluralLabel" : "Bookmarks"
      },
      "Publication" : {
         "pluralLabel" : "Publications"
      },
      "GoldStandardPublication" : {
         "pluralLabel" : "GoldStandardPublications"
      },
      "GoldStandardBookmark" : {
         "pluralLabel" : "GoldStandardBookmarks"
      },
      "Tag" : {
         "pluralLabel" : "Tags"
      },
      "User" : {
         "pluralLabel" : "Users"
      },
      "Group" : {
         "pluralLabel" : "Groups"
      },
      "Sphere" : {
         "pluralLabel" : "Spheres"
      }
   },
   
   "properties" : {
      "count" : {
         "valueType" : "number"
      },
      "date" : {
         "valueType" : "date"
      },
      "changeDate" : {
         "valueType" : "date"
      },
      "url" : {
         "valueType" : "url"
      },
      "id" : {
         "valueType" : "url"
      },
      "tags" : {
         "valueType" : "item"
      },
      "user" : {
         "valueType" : "item"
      }      
   },
   
   "items" : [
   	  
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2605591b84797a2b1286f6baff104ab4a/chriskrauter",         
         "tags" : [
            "simtech2-pn7","myown","visus-sedlmair","simtech2","vis(us)","pn7","visus-schmalstieg","exc2075"
         ],
         
         "intraHash" : "605591b84797a2b1286f6baff104ab4a",
         "interHash" : "26b8366361ceedf8ea68152a0c869a35",
         "label" : "Demonstration of VisRing: A Display-Extended Smartring for Nano Visualizations",
         "user" : "chriskrauter",
         "description" : "",
         "date" : "2026-01-16 14:13:18",
         "changeDate" : "2026-01-16 19:05:21",
         "count" : 4,
         "pub-type": "misc",
         "series": "UIST Adjunct","publisher":"ACM","address":"New York, NY, USA",
         "year": "2025", 
         "url": "", 
         
         "author": [ 
            "Runze Liu","Christian Krauter","Taiting Lu","Mara Schulte","Alexander Achberger","Tanja Blascheck","Michael Sedlmair","Mahanth Gowda"
         ],
         "authors": [
         	
            	{"first" : "Runze",	"last" : "Liu"},
            	{"first" : "Christian",	"last" : "Krauter"},
            	{"first" : "Taiting",	"last" : "Lu"},
            	{"first" : "Mara",	"last" : "Schulte"},
            	{"first" : "Alexander",	"last" : "Achberger"},
            	{"first" : "Tanja",	"last" : "Blascheck"},
            	{"first" : "Michael",	"last" : "Sedlmair"},
            	{"first" : "Mahanth",	"last" : "Gowda"}
         ],
         "pages": "1-4",
         "affiliation" : "Krauter, Christian, University of Stuttgart. Achberger, Alexander, Visualisierungsinstitut der Universität Stuttgart. Blascheck, Tanja, Institut für Visualisierung und Interaktive Systeme. Sedlmair, Michael, Visualisierungsinstitut der Universität Stuttgart",
         
         "orcid-numbers" : "Liu, Runze/0000-0003-2342-1644, Krauter, Christian/0000-0002-9787-0816, Lu, Taiting/0000-0002-9695-3142, Schulte, Mara/0009-0000-3575-5575, Blascheck, Tanja/0000-0003-4002-4499, Sedlmair, Michael/0000-0001-7048-9292, Gowda, Mahanth/0000-0001-5325-5013",
         
         "doi" : "10.1145/3746058.3758997",
         
         "bibtexKey": "liu:2025:demonstration"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/23ba680cfe222d0d46c2106fd72df27f4/chriskrauter",         
         "tags" : [
            "simtech2-pn7","myown","simtech","simtech2","exzellenzcluster","dfg","visus","exc2075","visus-sedlmair","vis(us)","pn7","visus-schmalstieg"
         ],
         
         "intraHash" : "3ba680cfe222d0d46c2106fd72df27f4",
         "interHash" : "5c8265c501f13abc2f0017e4eb7a37a0",
         "label" : "VisRing: A Display-Extended Smartring for Nano Visualizations",
         "user" : "chriskrauter",
         "description" : "",
         "date" : "2026-01-16 14:12:23",
         "changeDate" : "2026-01-16 19:05:48",
         "count" : 4,
         "pub-type": "misc",
         "series": "UIST","publisher":"ACM","address":"New York, NY, USA",
         "year": "2025", 
         "url": "", 
         
         "author": [ 
            "Taiting Lu","Christian Krauter","Runze Liu","Mara Schulte","Alexander Achberger","Tanja Blascheck","Michael Sedlmair","Mahanth Gowda"
         ],
         "authors": [
         	
            	{"first" : "Taiting",	"last" : "Lu"},
            	{"first" : "Christian",	"last" : "Krauter"},
            	{"first" : "Runze",	"last" : "Liu"},
            	{"first" : "Mara",	"last" : "Schulte"},
            	{"first" : "Alexander",	"last" : "Achberger"},
            	{"first" : "Tanja",	"last" : "Blascheck"},
            	{"first" : "Michael",	"last" : "Sedlmair"},
            	{"first" : "Mahanth",	"last" : "Gowda"}
         ],
         "pages": "1-18",
         "affiliation" : "Krauter, Christian, University of Stuttgart. Achberger, Alexander, Visualisierungsinstitut der Universität Stuttgart. Blascheck, Tanja, Institut für Visualisierung und Interaktive Systeme. Sedlmair, Michael, Visualisierungsinstitut der Universität Stuttgart",
         
         "orcid-numbers" : "Lu, Taiting/0000-0002-9695-3142, Krauter, Christian/0000-0002-9787-0816, Liu, Runze/0000-0003-2342-1644, Schulte, Mara/0009-0000-3575-5575, Blascheck, Tanja/0000-0003-4002-4499, Sedlmair, Michael/0000-0001-7048-9292, Gowda, Mahanth/0000-0001-5325-5013",
         
         "doi" : "10.1145/3746059.3747806",
         
         "bibtexKey": "lu:2025:visring:"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2a75068f7f5d7acabfd9a73c830acdfae/fairouzgrioui",         
         "tags" : [
            "myown","simtech","vis(us)","pn7","visus:griouifz","exc2075","visus:blaschta"
         ],
         
         "intraHash" : "a75068f7f5d7acabfd9a73c830acdfae",
         "interHash" : "7aeaabbde389284dfe7cdcb5bf8646f8",
         "label" : "Weather Data and Representations: A Survey of Wear OS Apps",
         "user" : "fairouzgrioui",
         "description" : "",
         "date" : "2024-10-29 15:15:39",
         "changeDate" : "2024-10-29 15:15:39",
         "count" : 3,
         "pub-type": "inproceedings",
         
         "year": "2024", 
         "url": "https://diglib.eg.org/items/e142d8ae-2549-4699-908d-b8440f458541", 
         
         "author": [ 
            "Jakob Rohwer","Fairouz Grioui","Tanja Blascheck"
         ],
         "authors": [
         	
            	{"first" : "Jakob",	"last" : "Rohwer"},
            	{"first" : "Fairouz",	"last" : "Grioui"},
            	{"first" : "Tanja",	"last" : "Blascheck"}
         ],
         
         "doi" : "10.2312/evp.20241098",
         
         "bibtexKey": "rohwer2024weather"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/27d261e9203f66b7f183a2a0293dadfe3/fairouzgrioui",         
         "tags" : [
            "myown","simtech","vis(us)","pn7","visus:griouifz","exc2075","visus:blaschta"
         ],
         
         "intraHash" : "7d261e9203f66b7f183a2a0293dadfe3",
         "interHash" : "d1ae917d9e191e3ae84661934c0d38d5",
         "label" : "Micro Visualizations on a Smartwatch: Assessing Reading Performance While Walking",
         "user" : "fairouzgrioui",
         "description" : "",
         "date" : "2024-10-29 15:11:06",
         "changeDate" : "2024-10-29 15:11:06",
         "count" : 2,
         "pub-type": "inproceedings",
         
         "year": "2024", 
         "url": "https://arxiv.org/abs/2407.17893", 
         
         "author": [ 
            "Fairouz Grioui","Tanja Blascheck","Lijie Yao","Petra Isenberg"
         ],
         "authors": [
         	
            	{"first" : "Fairouz",	"last" : "Grioui"},
            	{"first" : "Tanja",	"last" : "Blascheck"},
            	{"first" : "Lijie",	"last" : "Yao"},
            	{"first" : "Petra",	"last" : "Isenberg"}
         ],
         
         "bibtexKey": "griouimicro"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2f91e401eab2ece4d9187b08d4a53ef39/fairouzgrioui",         
         "tags" : [
            "simtech","vis(us)","pn7","visus:griouifz","exc2075","visus:blaschta"
         ],
         
         "intraHash" : "f91e401eab2ece4d9187b08d4a53ef39",
         "interHash" : "ff9115f6fdff10a823799a23fe9467f9",
         "label" : "Personal Mobile Devices to Assist with Wrist Rehabilitation at Home",
         "user" : "fairouzgrioui",
         "description" : "",
         "date" : "2024-10-29 15:06:52",
         "changeDate" : "2024-10-29 15:06:52",
         "count" : 1,
         "pub-type": "inproceedings",
         
         "year": "2024", 
         "url": "https://diglib.eg.org/items/20d19dd8-3313-49ff-911f-71cb25bdca63", 
         
         "author": [ 
            "Fairouz Grioui","Pantelis Antoniadis","Xingyao Yu","Tanja Blascheck"
         ],
         "authors": [
         	
            	{"first" : "Fairouz",	"last" : "Grioui"},
            	{"first" : "Pantelis",	"last" : "Antoniadis"},
            	{"first" : "Xingyao",	"last" : "Yu"},
            	{"first" : "Tanja",	"last" : "Blascheck"}
         ],
         
         "doi" : "10.2312/evp.20241090",
         
         "bibtexKey": "noauthororeditor2024personal"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2106afe71db48420fb0427e965db64b73/tanjamunz",         
         "tags" : [
            "myown","visus:weiskopf","visus:munzta","vis(us)","visus"
         ],
         
         "intraHash" : "106afe71db48420fb0427e965db64b73",
         "interHash" : "990b4793043d16fcfd50983d0172b5a8",
         "label" : "VisME software v1.2",
         "user" : "tanjamunz",
         "description" : "",
         "date" : "2024-07-16 00:28:44",
         "changeDate" : "2024-07-16 18:47:27",
         "count" : 5,
         "pub-type": "misc",
         "publisher":"Zenodo",
         "year": "2019", 
         "url": "https://zenodo.org/record/3352236", 
         
         "author": [ 
            "Tanja Munz"
         ],
         "authors": [
         	
            	{"first" : "Tanja",	"last" : "Munz"}
         ],
         
         "copyright" : "Creative Commons Attribution 4.0 International",
         
         "doi" : "10.5281/ZENODO.3352236",
         
         "bibtexKey": "munz2019visme"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2096de0b9c50dde009cdaee2197b3fa82/tanjamunz",         
         "tags" : [
            "myown","visus:weiskopf","visus:munzta","vis(us)","visus"
         ],
         
         "intraHash" : "096de0b9c50dde009cdaee2197b3fa82",
         "interHash" : "9b847b4ad1d4947d2faebea4e35eeb56",
         "label" : "NMTVis - Neural Machine Translation Visualization System",
         "user" : "tanjamunz",
         "description" : "",
         "date" : "2024-07-16 00:26:43",
         "changeDate" : "2024-07-16 19:14:21",
         "count" : 6,
         "pub-type": "misc",
         "publisher":"DaRUS",
         "year": "2021", 
         "url": "https://darus.uni-stuttgart.de/citation?persistentId=doi:10.18419/darus-1849", 
         
         "author": [ 
            "Tanja Munz","Dirk Väth","Paul Kuznecov","Ngoc Thang Vu","Daniel Weiskopf"
         ],
         "authors": [
         	
            	{"first" : "Tanja",	"last" : "Munz"},
            	{"first" : "Dirk",	"last" : "Väth"},
            	{"first" : "Paul",	"last" : "Kuznecov"},
            	{"first" : "Ngoc Thang",	"last" : "Vu"},
            	{"first" : "Daniel",	"last" : "Weiskopf"}
         ],
         "note": "Related to: T. Munz, D. Väth, P. Kuznecov, N. T. Vu, and D. Weiskopf. \"Visual-Interactive Neural Machine Translation\". Graphics Interface. 2021","abstract": "NMTVis is a web-based visual analytics system to analyze, understand, and correct translations generated with neural machine translation. First, a document can be translated using a neural machine translation model (we support an LSTM-based and the Transformer architecture). Afterward, users can find mistranslated sentences, explore and correct these sentences and retrain the model to generate a better translation for the whole document. Our approach targets the correction of domain-specific documents.",
         "doi" : "10.18419/DARUS-1849",
         
         "bibtexKey": "munz2021nmtvis"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/23b4900c8003b32e95bfcc45a3bb26923/tanjamunz",         
         "tags" : [
            "myown","visus:weiskopf","visus:munzta","vis(us)","visus"
         ],
         
         "intraHash" : "3b4900c8003b32e95bfcc45a3bb26923",
         "interHash" : "464a6cccfb871979549b29ba4c55dd3c",
         "label" : "Exploring visual quality of multidimensional time series projections",
         "user" : "tanjamunz",
         "description" : "",
         "date" : "2024-05-28 11:11:18",
         "changeDate" : "2024-05-28 11:11:18",
         "count" : 6,
         "pub-type": "article",
         "journal": "Visual Informatics",
         "year": "2024", 
         "url": "", 
         
         "author": [ 
            "Tanja Munz-Körner","Daniel Weiskopf"
         ],
         "authors": [
         	
            	{"first" : "Tanja",	"last" : "Munz-Körner"},
            	{"first" : "Daniel",	"last" : "Weiskopf"}
         ],
         
         "doi" : "https://doi.org/10.1016/j.visinf.2024.04.004",
         
         "bibtexKey": "visualQuality2024"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2bfc58e342371818ab646ed01e0cbed02/tanjamunz",         
         "tags" : [
            "myown","visus:weiskopf","visus:munzta","vis(us)","visus"
         ],
         
         "intraHash" : "bfc58e342371818ab646ed01e0cbed02",
         "interHash" : "5613d7f9aaa4a66ef45118bf9a45a74d",
         "label" : "Visual Analysis System to Explore the Visual Quality of Multidimensional Time Series Projections",
         "user" : "tanjamunz",
         "description" : "",
         "date" : "2024-05-28 11:00:04",
         "changeDate" : "2024-07-16 18:53:24",
         "count" : 6,
         "pub-type": "misc",
         "publisher":"DaRUS",
         "year": "2024", 
         "url": "https://darus.uni-stuttgart.de/citation?persistentId=doi:10.18419/darus-3553", 
         
         "author": [ 
            "Tanja Munz-Körner","Daniel Weiskopf"
         ],
         "authors": [
         	
            	{"first" : "Tanja",	"last" : "Munz-Körner"},
            	{"first" : "Daniel",	"last" : "Weiskopf"}
         ],
         "note": "Related to: T. Munz-Körner, D. Weiskopf, Exploring visual quality of multidimensional time series projections, Visual Informatics (2024). doi: 10.1016/j.visinf.2024.04.004","abstract": "Source code of our visual analysis system for the exploration of the visual quality of multidimensional time series projections.This project contains source code for preprocessing data and the visual analysis system. Additionally, we added precomputed data for immediate use in the visual analysis system.Our project contains the following directories/files of interest: datasets: Data sets for the use with our visual analysis system. The data can also be generated with the data preparation scripts. static, templates, and dimRed: Java script / Python code of our visualization approach. run_windows: Scripts to run our system on windows. run_linux: Scripts to run our system on linux. datasets.txt: List of directories used in preprocessing and for the visualization.Please have a look at the README file for more details.",
         "doi" : "10.18419/DARUS-3553",
         
         "bibtexKey": "munz2024quality"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2bd80321764fb839f2a0a022ca9334a15/tanjamunz",         
         "tags" : [
            "myown","visus:weiskopf","visus:munzta","vis(us)","visus"
         ],
         
         "intraHash" : "bd80321764fb839f2a0a022ca9334a15",
         "interHash" : "ed70500a2d403b1b61f6a517907a0517",
         "label" : "Supplemental Material for \"Exploring Visual Quality of Multidimensional Time Series Projections\"",
         "user" : "tanjamunz",
         "description" : "",
         "date" : "2024-05-28 10:57:38",
         "changeDate" : "2024-07-16 18:55:52",
         "count" : 6,
         "pub-type": "misc",
         "publisher":"DaRUS",
         "year": "2024", 
         "url": "https://darus.uni-stuttgart.de/citation?persistentId=doi:10.18419/darus-3965", 
         
         "author": [ 
            "Tanja Munz-Körner","Daniel Weiskopf"
         ],
         "authors": [
         	
            	{"first" : "Tanja",	"last" : "Munz-Körner"},
            	{"first" : "Daniel",	"last" : "Weiskopf"}
         ],
         "note": "Related to: T. Munz-Körner, D. Weiskopf, Exploring visual quality of multidimensional time series projections, Visual Informatics (2024). doi: 10.1016/j.visinf.2024.04.004","abstract": "Supplemental material for our paper \"Exploring visual quality of multidimensional time series projections\": A video demonstrating the interactive use of our exploration system. A table containing publications using dimensionality reduction on multidimensional time series to project them to 2D for visualization and exploration. A file containing images illustrating the data used in section 5.2. (Simulation Data: Kármán Vortex Street) and section 5.3. (Real Footage: Hurricane Dorian Timelapse).  A video illustrating the data used in section 5.2. (Simulation Data: Kármán Vortex Street).",
         "doi" : "10.18419/DARUS-3965",
         
         "bibtexKey": "munz2024supplemental"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/21a5a2f9c38c1681813ec4543f0020088/frankheyen",         
         "tags" : [
            "myown","sfbtrr161","visus:ngoqh","visus:heyenfk","2023","cybervalley","peerreviewed","visus","vis(us)","visus:sedlmaml"
         ],
         
         "intraHash" : "1a5a2f9c38c1681813ec4543f0020088",
         "interHash" : "1380e23419fe4dd89f3a9f38d4831cd3",
         "label" : "Visual Overviews for Sheet Music Structure",
         "user" : "frankheyen",
         "description" : "More materials here: https://ismir2023program.ismir.net/poster_216.html\r\nand here: https://visvar.github.io/pub/heyen2023visual.html",
         "date" : "2024-01-05 17:11:56",
         "changeDate" : "2024-01-05 17:11:56",
         "count" : 10,
         "pub-type": "inproceedings",
         "booktitle": "Proceedings of the 24th International Society for Music Information Retrieval Conference (ISMIR)","publisher":"ISMIR",
         "year": "2023", 
         "url": "https://zenodo.org/doi/10.5281/zenodo.10265383", 
         
         "author": [ 
            "Frank Heyen","Quynh Quang Ngo","Michael Sedlmair"
         ],
         "authors": [
         	
            	{"first" : "Frank",	"last" : "Heyen"},
            	{"first" : "Quynh Quang",	"last" : "Ngo"},
            	{"first" : "Michael",	"last" : "Sedlmair"}
         ],
         "pages": "692-699","abstract": "We propose different methods for alternative representation and visual augmentation of sheet music that help users gain an overview of general structure, repeating patterns, and the similarity of segments. To this end, we explored mapping the overall similarity between sections or bars to colors. For these mappings, we use dimensionality reduction or clustering to assign similar segments to similar colors and vice versa. To provide a better overview, we further designed simplified music notation representations, including hierarchical and compressed encodings. These overviews allow users to display whole pieces more compactly on a single screen without clutter and to find and navigate to distant segments more quickly. Our preliminary evaluation with guitarists and tablature shows that our design supports users in tasks such as analyzing structure, finding repetitions, and determining the similarity of specific segments to others.",
         "venue" : "Milan, Italy",
         
         "copyright" : "Creative Commons Attribution 4.0 International",
         
         "doi" : "10.5281/ZENODO.10265383",
         
         "bibtexKey": "https://doi.org/10.5281/zenodo.10265383"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/24879d85b7fa8ecabd625e0b00e178a48/msedlmair",         
         "tags" : [
            "myown","simtech","vis(us)","visus:sedlmaml","exc2075","exc2075(from2019)"
         ],
         
         "intraHash" : "4879d85b7fa8ecabd625e0b00e178a48",
         "interHash" : "dc36cdc101c0105f6b4c40b65d985d81",
         "label" : "cARdLearner: Using Expressive Virtual Agents when Learning Vocabulary in Augmented Reality",
         "user" : "msedlmair",
         "description" : "",
         "date" : "2023-11-21 17:34:06",
         "changeDate" : "2023-11-21 17:34:06",
         "count" : 11,
         "pub-type": "inproceedings",
         "booktitle": "ACM Conference on Human Factors in Computing Systems Extended Abstracts (CHI-EA))",
         "year": "2022", 
         "url": "https://doi.org/10.1145/3491101.3519631", 
         
         "author": [ 
            "Aimee Sousa Calepso","Natalie Hube","Noah Berenguel Senn","Vincent Brandt","Michael Sedlmair"
         ],
         "authors": [
         	
            	{"first" : "Aimee",	"last" : "Sousa Calepso"},
            	{"first" : "Natalie",	"last" : "Hube"},
            	{"first" : "Noah",	"last" : "Berenguel Senn"},
            	{"first" : "Vincent",	"last" : "Brandt"},
            	{"first" : "Michael",	"last" : "Sedlmair"}
         ],
         
         "isbn" : "9781450391566",
         
         "numpages" : "6",
         
         "articleno" : "245",
         
         "location" : "New Orleans, LA, USA",
         
         "doi" : "10.1145/3491101.3519631",
         
         "bibtexKey": "calepso2022cardlearner"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2c9efd9e4eddc4db6dc9aaf808b170f2a/msedlmair",         
         "tags" : [
            "myown","simtech","vis(us)","visus:sedlmaml","exc2075","exc2075(from2019)"
         ],
         
         "intraHash" : "c9efd9e4eddc4db6dc9aaf808b170f2a",
         "interHash" : "642477c3aabc971a9d5db8bf800f1fc8",
         "label" : "AR Hero: Generating Interactive Augmented Reality Guitar Tutorials",
         "user" : "msedlmair",
         "description" : "",
         "date" : "2023-11-21 17:26:56",
         "changeDate" : "2023-11-21 17:26:56",
         "count" : 11,
         "pub-type": "inproceedings",
         "booktitle": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
         "year": "2022", 
         "url": "", 
         
         "author": [ 
            "Lucchas Ribeiro Skreinig","Ana Stanescu","Shohei Mori","Frank Heyen","Peter Mohr","Michael Sedlmair","Dieter Schmalstieg","Denis Kalkofen"
         ],
         "authors": [
         	
            	{"first" : "Lucchas Ribeiro",	"last" : "Skreinig"},
            	{"first" : "Ana",	"last" : "Stanescu"},
            	{"first" : "Shohei",	"last" : "Mori"},
            	{"first" : "Frank",	"last" : "Heyen"},
            	{"first" : "Peter",	"last" : "Mohr"},
            	{"first" : "Michael",	"last" : "Sedlmair"},
            	{"first" : "Dieter",	"last" : "Schmalstieg"},
            	{"first" : "Denis",	"last" : "Kalkofen"}
         ],
         "pages": "395-401","abstract": "We introduce a system capable of generating interactive Aug-mented Reality guitar tutorials by parsing common digital guitar tablature and by capturing the performance of an expert using a multi-camera array. Instructions are presented to the user in an Augmented Reality application using either an abstract visualization, a 3D virtual hand, or a 3D video. To support individual users at different skill levels the system provides full control of the play-back of a tutorial, including its speed and looping behavior, while delivering live feedback on the user's performance.",
         "doi" : "10.1109/VRW55335.2022.00086",
         
         "bibtexKey": "9757565"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2b8da417e8aaf9d7f9cc4030b1054fd94/msedlmair",         
         "tags" : [
            "myown","simtech","vis(us)","visus:sedlmaml","EXC2075","exc2075","exc2075(from2019)"
         ],
         
         "intraHash" : "b8da417e8aaf9d7f9cc4030b1054fd94",
         "interHash" : "c491444b6f8fdccd47fc036bd0601481",
         "label" : "Using Expressive Avatars to Increase Emotion Recognition: A Pilot Study",
         "user" : "msedlmair",
         "description" : "",
         "date" : "2023-11-21 17:10:46",
         "changeDate" : "2023-11-21 17:10:46",
         "count" : 6,
         "pub-type": "inproceedings",
         "booktitle": "Extended Abstracts of the 2022 CHI Conference on Human Factors in Computing Systems","series": "CHI EA '22","publisher":"Association for Computing Machinery","address":"New York, NY, USA",
         "year": "2022", 
         "url": "https://doi.org/10.1145/3491101.3519822", 
         
         "author": [ 
            "Natalie Hube","Kresimir Vidackovic","Michael Sedlmair"
         ],
         "authors": [
         	
            	{"first" : "Natalie",	"last" : "Hube"},
            	{"first" : "Kresimir",	"last" : "Vidackovic"},
            	{"first" : "Michael",	"last" : "Sedlmair"}
         ],
         "pages": "1\u20137","abstract": "Virtual avatars are widely used for collaborating in virtual environments. Yet, often these avatars lack expressiveness to determine a state of mind. Prior work has demonstrated effective usage of determining emotions and animated lip movement through analyzing mere audio tracks of spoken words. To provide this information on a virtual avatar, we created a natural audio data set consisting of 17 audio files from which we then extracted the underlying emotion and lip movement. To conduct a pilot study, we developed a prototypical system that displays the extracted visual parameters and then maps them on a virtual avatar while playing the corresponding audio file. We tested the system with 5 participants in two conditions: (i) while seeing the virtual avatar only an audio file was played. (ii) In addition to the audio file, the extracted facial visual parameters were displayed on the virtual avatar. Our results suggest the validity of using additional visual parameters in the avatars\u2019 face as it helps to determine emotions. We conclude with a brief discussion on the outcomes and their implications on future work.",
         "isbn" : "9781450391566",
         
         "location" : "New Orleans, LA, USA",
         
         "doi" : "10.1145/3491101.3519822",
         
         "bibtexKey": "Hube2022"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/245f80081761d7a4507e7f2b210b79768/tanjamunz",         
         "tags" : [
            "myown","visus:weiskopf","visus:kuenzesn","visus:munzta","vis(us)","pn6","visus","exc2075"
         ],
         
         "intraHash" : "45f80081761d7a4507e7f2b210b79768",
         "interHash" : "e858fa98d08c5e0e3d2b4baefa5bd60e",
         "label" : "Visual Analysis of Scene-Graph-Based Visual Question Answering",
         "user" : "tanjamunz",
         "description" : "",
         "date" : "2023-10-23 19:14:40",
         "changeDate" : "2023-10-23 19:14:40",
         "count" : 10,
         "pub-type": "inproceedings",
         "booktitle": "Proceedings of the 16th International Symposium on Visual Information Communication and Interaction","series": "VINCI '23","publisher":"Association for Computing Machinery","address":"New York, NY, USA",
         "year": "2023", 
         "url": "https://doi.org/10.1145/3615522.3615547", 
         
         "author": [ 
            "Noel Schäfer","Sebastian Künzel","Tanja Munz-Körner","Pascal Tilli","Sandeep Vidyapu","Ngoc Thang Vu","Daniel Weiskopf"
         ],
         "authors": [
         	
            	{"first" : "Noel",	"last" : "Schäfer"},
            	{"first" : "Sebastian",	"last" : "Künzel"},
            	{"first" : "Tanja",	"last" : "Munz-Körner"},
            	{"first" : "Pascal",	"last" : "Tilli"},
            	{"first" : "Sandeep",	"last" : "Vidyapu"},
            	{"first" : "Ngoc",	"last" : "Thang Vu"},
            	{"first" : "Daniel",	"last" : "Weiskopf"}
         ],
         "pages": "1\u20138","abstract": "Scene-graph-based Visual Question Answering (VQA) has emerged as a burgeoning field in Deep Learning research, with a growing demand for robust and interpretable VQA systems. In this paper, we present a novel visual analysis approach that addresses two critical objectives in VQA: identifying and correcting prediction issues and providing insights into model decision-making processes through visualizing internal information. Our approach builds on the GraphVQA framework, which uses graph neural networks to process scene graphs representing images and which was trained on the widely-used GQA dataset. Our analysis tool aims at users familiar with the basics of graph-based VQA. By leveraging query-based scene analysis and visualization of crucial internal states, we are able to detect and pinpoint reasons for inaccurate predictions, facilitating model refinement and dataset curation. Identifying expressive internal states is a challenge. Through rigorous computer-based evaluations and presentation of a use case, we demonstrate the effectiveness of our analysis tool and model state visualization.",
         "isbn" : "9798400707513",
         
         "location" : "Guangzhou, China",
         
         "doi" : "10.1145/3615522.3615547",
         
         "bibtexKey": "Schaefer2023"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2191e8966ded5955274b1c1d03f00077c/weiskopf",         
         "tags" : [
            "myown","visus:weiskopf","vis(us)","visus"
         ],
         
         "intraHash" : "191e8966ded5955274b1c1d03f00077c",
         "interHash" : "8b8e1d14193a0955bb165740aa3075d0",
         "label" : "Philosophy of Action and Its Relationship to\r\nInteractive Visualisation and Molière\u2019s theatre",
         "user" : "weiskopf",
         "description" : "",
         "date" : "2023-10-16 18:15:48",
         "changeDate" : "2023-10-16 18:15:48",
         "count" : 7,
         "pub-type": "article",
         "journal": "Comparatio",
         "year": "2023", 
         "url": "", 
         
         "author": [ 
            "Daniel M. Feige","Daniel Weiskopf","Kirsten Dickhaut"
         ],
         "authors": [
         	
            	{"first" : "Daniel M.",	"last" : "Feige"},
            	{"first" : "Daniel",	"last" : "Weiskopf"},
            	{"first" : "Kirsten",	"last" : "Dickhaut"}
         ],
         
         "editor": [ 
            "Linda Simonis","Annette Simonis","Kirsten Dickhaut"
         ],
         "editors": [
         	
            	{"first" : "Linda",	"last" : "Simonis"},
            	{"first" : "Annette",	"last" : "Simonis"},
            	{"first" : "Kirsten",	"last" : "Dickhaut"}
         ],
         "volume": "15","number": "1","pages": "75-86",
         "bibtexKey": "feige2023philosophy"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/26cca2701ae2212a10884637e160eb7ac/tanjamunz",         
         "tags" : [
            "myown","visus:weiskopf","visus:munzta","vis(us)","PN6","EXC2075","visus"
         ],
         
         "intraHash" : "6cca2701ae2212a10884637e160eb7ac",
         "interHash" : "8ffcd5ba63c57cbd84dc4174f6bb2021",
         "label" : "NMTVis - Trained Models for our Visual Analytics System",
         "user" : "tanjamunz",
         "description" : "",
         "date" : "2023-10-04 17:35:47",
         "changeDate" : "2023-10-04 17:35:47",
         "count" : 7,
         "pub-type": "misc",
         "publisher":"DaRUS",
         "year": "2021", 
         "url": "https://darus.uni-stuttgart.de/citation?persistentId=doi:10.18419/darus-1850", 
         
         "author": [ 
            "Tanja Munz","Dirk Väth","Paul Kuznecov","Ngoc Thang Vu","Daniel Weiskopf"
         ],
         "authors": [
         	
            	{"first" : "Tanja",	"last" : "Munz"},
            	{"first" : "Dirk",	"last" : "Väth"},
            	{"first" : "Paul",	"last" : "Kuznecov"},
            	{"first" : "Ngoc Thang",	"last" : "Vu"},
            	{"first" : "Daniel",	"last" : "Weiskopf"}
         ],
         
         "doi" : "10.18419/DARUS-1850",
         
         "bibtexKey": "https://doi.org/10.18419/darus-1850"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/297c16a709b140b9f490d8a254107e02f/tanjamunz",         
         "tags" : [
            "myown","visus:weiskopf","visus:munzta","vis(us)","PN6","EXC2075","visus"
         ],
         
         "intraHash" : "97c16a709b140b9f490d8a254107e02f",
         "interHash" : "2ca9d88c72f0d61dcd18e560d7385e62",
         "label" : "NMTVis - Extended Neural Machine Translation Visualization System",
         "user" : "tanjamunz",
         "description" : "",
         "date" : "2023-10-04 17:33:52",
         "changeDate" : "2023-10-04 17:33:52",
         "count" : 6,
         "pub-type": "misc",
         
         "year": "2022", 
         "url": "", 
         
         "author": [ 
            "Tanja Munz","Dirk Väth","Paul Kuznecov","Ngoc Thang Vu","Daniel Weiskopf"
         ],
         "authors": [
         	
            	{"first" : "Tanja",	"last" : "Munz"},
            	{"first" : "Dirk",	"last" : "Väth"},
            	{"first" : "Paul",	"last" : "Kuznecov"},
            	{"first" : "Ngoc Thang",	"last" : "Vu"},
            	{"first" : "Daniel",	"last" : "Weiskopf"}
         ],
         "note": "Related to: T. Munz, D. Väth, P. Kuznecov, N. T. Vu, and D. Weiskopf. \"Visualization-based improvement of neural machine translation\", Computers & Graphics, 2021. doi: 10.1016/j.cag.2021.12.003","abstract": "NMTVis is a web-based visual analytics system to analyze, understand, and correct translations generated with neural machine translation. First, a document can be translated using a neural machine translation model (we support an LSTM-based and the Transformer architecture). Afterward, users can find mistranslated sentences, explore and correct these sentences and retrain the model to generate a better translation for the whole document. Our approach targets the correction of domain-specific documents. This extended version of our visual analytics system provides additional visualization and interaction techniques as well as scripts for computer-based evaluation of our approach. You can find important information about our system here and an introduction to our system here.",
         "affiliation" : "Munz, Tanja/University of Stuttgart, Väth, Dirk/University of Stuttgart, Kuznecov, Paul/University of Stuttgart, Vu, Ngoc Thang/University of Stuttgart, Weiskopf, Daniel/University of Stuttgart",
         
         "orcid-numbers" : "Munz, Tanja/0000-0003-3960-3290, Weiskopf, Daniel/0000-0003-1174-1026",
         
         "doi" : "10.18419/darus-2124",
         
         "bibtexKey": "munz2022nmtvis"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2cc11065a707047b43139817722cc519f/tanjamunz",         
         "tags" : [
            "myown","visus:weiskopf","visus:munzta","vis(us)","visus"
         ],
         
         "intraHash" : "cc11065a707047b43139817722cc519f",
         "interHash" : "700a9373ea538d40a5f9b6e934af1f5f",
         "label" : "Visual Analytics System for Hidden States in Recurrent Neural Networks",
         "user" : "tanjamunz",
         "description" : "",
         "date" : "2023-10-04 17:32:48",
         "changeDate" : "2024-07-16 18:57:46",
         "count" : 6,
         "pub-type": "misc",
         "publisher":"DaRUS",
         "year": "2021", 
         "url": "https://darus.uni-stuttgart.de/citation?persistentId=doi:10.18419/darus-2052", 
         
         "author": [ 
            "Tanja Munz","Rafael Garcia","Daniel Weiskopf"
         ],
         "authors": [
         	
            	{"first" : "Tanja",	"last" : "Munz"},
            	{"first" : "Rafael",	"last" : "Garcia"},
            	{"first" : "Daniel",	"last" : "Weiskopf"}
         ],
         "note": "Related to: R. Garcia, T. Munz, and D. Weiskopf. \"Visual Analytics Tool for the Interpretation of Hidden States in Recurrent Neural Networks\". Visual Computing for Industry, Biomedicine, and Art (VCIBA). 2021. doi: 10.1186/s42492-021-00090-0","abstract": "Source code of our visual analytics system for the interpretation of hidden states in recurrent neural networks. This project contains source code for preprocessing data and the visual analytics system. Additionally, we added precomputed data for immediate use in the visual analysis system.  The sub directories contain the following:      dataPreparation: Python scripts to prepare data for analysis. In these scripts, Long Short-Term Memory (LSTM) models are trained and data for our visual analytics system is exported.    visualAnalytics: The source code of our visual analytics system to explore hidden states.     demonstrationData: Data files for the use with our visual analytics system. The same data can also be generated with the data preparation scripts.   We provide two scripts to generate data for analysis in our visual analytics system: for the IMDB and Reuters dataset as available in Keras. The output files can then be loaded into our visual analytics system; their locations have to be specified in userData.toml of the visual analytics system.   The output file of our data preparation scripts or the ones provided for demonstration can be loaded in our visual analytics system for visualization and analysis. Since we provide input files, you do not have to run the preprocessing steps and can use our visual analytics system immediately.   Please have a look at the respective README-files for more details.",
         "doi" : "10.18419/DARUS-2052",
         
         "bibtexKey": "https://doi.org/10.18419/darus-2052"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/27a0b2decd921d15add6cf42e733fe3e4/tanjamunz",         
         "tags" : [
            "myown","visus:weiskopf","visus:munzta","vis(us)","PN6","EXC2075","visus"
         ],
         
         "intraHash" : "7a0b2decd921d15add6cf42e733fe3e4",
         "interHash" : "1d114b870579095267e6815b0c43d9ff",
         "label" : "Visual Analysis System for Scene-Graph-Based Visual Question Answering",
         "user" : "tanjamunz",
         "description" : "",
         "date" : "2023-10-04 17:31:48",
         "changeDate" : "2023-10-04 17:31:48",
         "count" : 6,
         "pub-type": "misc",
         
         "year": "2023", 
         "url": "", 
         
         "author": [ 
            "Noel Schäfer","Pascal Tilli","Tanja Munz-Körner","Sebastian Künzel","Sandeep Vidyapu","Ngoc Thang Vu","Daniel Weiskopf"
         ],
         "authors": [
         	
            	{"first" : "Noel",	"last" : "Schäfer"},
            	{"first" : "Pascal",	"last" : "Tilli"},
            	{"first" : "Tanja",	"last" : "Munz-Körner"},
            	{"first" : "Sebastian",	"last" : "Künzel"},
            	{"first" : "Sandeep",	"last" : "Vidyapu"},
            	{"first" : "Ngoc Thang",	"last" : "Vu"},
            	{"first" : "Daniel",	"last" : "Weiskopf"}
         ],
         "note": "Related to: N. Schäfer, S. Künzel, T. Munz, P. Tilli, N. T. Vu, and D. Weiskopf. Visual Analysis of Scene-Graph-Based Visual Question Answering. Proceedings of the 16th International Symposium on Visual Information Communication and Interaction (VINCI 2023). 2023","abstract": "Source code of our visual analysis system to explore scene-graph-based visual question answering.This approach is built on top of the state-of-the-art GraphVQA frameworkwhich was trained on the GQA dataset. Instructions on how to use our system can be found in the README.",
         "affiliation" : "Schäfer, Noel/University of Stuttgart, Tilli, Pascal/University of Stuttgart, Munz-Körner, Tanja/University of Stuttgart, Künzel, Sebastian/University of Stuttgart, Vidyapu, Sandeep/University of Stuttgart, Vu, Ngoc Thang/University of Stuttgart, Weiskopf, Daniel/University of Stuttgart",
         
         "orcid-numbers" : "Munz-Körner, Tanja/0000-0003-3960-3290, Künzel, Sebastian/0009-0001-0799-4293, Vidyapu, Sandeep/0000-0003-3595-5221, Vu, Ngoc Thang/0000-0001-7893-9147, Weiskopf, Daniel/0000-0003-1174-1026",
         
         "doi" : "10.18419/darus-3589",
         
         "bibtexKey": "schafer2023visual"

      }
	  
   ]
}
