
{  
   "types" : {
      "Bookmark" : {
         "pluralLabel" : "Bookmarks"
      },
      "Publication" : {
         "pluralLabel" : "Publications"
      },
      "GoldStandardPublication" : {
         "pluralLabel" : "GoldStandardPublications"
      },
      "GoldStandardBookmark" : {
         "pluralLabel" : "GoldStandardBookmarks"
      },
      "Tag" : {
         "pluralLabel" : "Tags"
      },
      "User" : {
         "pluralLabel" : "Users"
      },
      "Group" : {
         "pluralLabel" : "Groups"
      },
      "Sphere" : {
         "pluralLabel" : "Spheres"
      }
   },
   
   "properties" : {
      "count" : {
         "valueType" : "number"
      },
      "date" : {
         "valueType" : "date"
      },
      "changeDate" : {
         "valueType" : "date"
      },
      "url" : {
         "valueType" : "url"
      },
      "id" : {
         "valueType" : "url"
      },
      "tags" : {
         "valueType" : "item"
      },
      "user" : {
         "valueType" : "item"
      }      
   },
   
   "items" : [
   	  
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/243ef156482932a2c6bf19b98003138f5/rss",         
         "tags" : [
            "rss","myown","models","explainability","language","large","performance","threshold-based"
         ],
         
         "intraHash" : "43ef156482932a2c6bf19b98003138f5",
         "interHash" : "caff878720646430bd1f1b19e87c80bf",
         "label" : "LLM-Based Explainability at Design Time: Detecting Elasticity Antipatterns in Software Architectures",
         "user" : "rss",
         "description" : "",
         "date" : "2026-02-05 17:02:20",
         "changeDate" : "2026-02-05 17:02:20",
         "count" : 2,
         "pub-type": "inproceedings",
         "booktitle": "Software Architecture. ECSA 2025 Tracks and Workshops","publisher":"Springer Nature Switzerland","address":"Cham",
         "year": "2025", 
         "url": "", 
         
         "author": [ 
            "Floriment Klinaku","Jonas Lammert","Steffen Becker"
         ],
         "authors": [
         	
            	{"first" : "Floriment",	"last" : "Klinaku"},
            	{"first" : "Jonas",	"last" : "Lammert"},
            	{"first" : "Steffen",	"last" : "Becker"}
         ],
         
         "editor": [ 
            "Domenico Bianculli","Hassan Sartaj","Vasilios Andrikopoulos","Cesare Pautasso","Tommi Mikkonen","Jennifer Perez","Tomás Bures","Martina De Sanctis","Henry Muccini","Elena Navarro","Mohamed Soliman","Uwe Zdun"
         ],
         "editors": [
         	
            	{"first" : "Domenico",	"last" : "Bianculli"},
            	{"first" : "Hassan",	"last" : "Sartaj"},
            	{"first" : "Vasilios",	"last" : "Andrikopoulos"},
            	{"first" : "Cesare",	"last" : "Pautasso"},
            	{"first" : "Tommi",	"last" : "Mikkonen"},
            	{"first" : "Jennifer",	"last" : "Perez"},
            	{"first" : "Tomás",	"last" : "Bures"},
            	{"first" : "Martina",	"last" : "De Sanctis"},
            	{"first" : "Henry",	"last" : "Muccini"},
            	{"first" : "Elena",	"last" : "Navarro"},
            	{"first" : "Mohamed",	"last" : "Soliman"},
            	{"first" : "Uwe",	"last" : "Zdun"}
         ],
         "pages": "141--154","abstract": "As software architecture grows in complexity, understanding the implications of design decisions becomes increasingly challenging. Large Language Models (LLMs) offer new opportunities for enhancing explainability during architecture modeling and evaluation by generating natural language explanations that support comprehension, learning, and decision-making. This potential is particularly valuable in domains with increased technical complexity---such as elasticity in cloud-based systems. In this work, we integrate and evaluate LLM-based explanations in supporting design-time evaluation of software architectures, focusing on the detection of elasticity antipatterns. Elasticity antipatterns are flawed autoscaling policy configurations that potentially lead to inefficient or unreliable system behavior. We extend an existing modeling and simulation approach with a novel feature that generates contextualized, textual explanations derived from simulation data. These explanations aim to guide architects in understanding scaling behaviors, identifying design issues, and refining their models. Our contribution includes the conceptualization of explanation types relevant to elasticity modeling, the design of prompt templates to elicit effective responses from LLMs, and an evaluation of the generated explanations' usefulness and quality. Results indicate that LLM-assisted feedback enhances the interpretability of elasticity models and supports the early identification of antipatterns, albeit with some limitations in precision and conciseness with only a slight agreement between expert evaluations (\\$\\$\\backslashkappa \\$\\$$\\kappa$ = 0.202). The explanation quality across types of explanations differs. Even though most explanations contain factual information, a large portion was deemed as imprecise especially in explaining problem and solution, the policy and target and service level objectives.",
         "isbn" : "978-3-032-04403-7",
         
         "bibtexKey": "10.1007/978-3-032-04403-7_14"

      }
	  
   ]
}
