
{  
   "types" : {
      "Bookmark" : {
         "pluralLabel" : "Bookmarks"
      },
      "Publication" : {
         "pluralLabel" : "Publications"
      },
      "GoldStandardPublication" : {
         "pluralLabel" : "GoldStandardPublications"
      },
      "GoldStandardBookmark" : {
         "pluralLabel" : "GoldStandardBookmarks"
      },
      "Tag" : {
         "pluralLabel" : "Tags"
      },
      "User" : {
         "pluralLabel" : "Users"
      },
      "Group" : {
         "pluralLabel" : "Groups"
      },
      "Sphere" : {
         "pluralLabel" : "Spheres"
      }
   },
   
   "properties" : {
      "count" : {
         "valueType" : "number"
      },
      "date" : {
         "valueType" : "date"
      },
      "changeDate" : {
         "valueType" : "date"
      },
      "url" : {
         "valueType" : "url"
      },
      "id" : {
         "valueType" : "url"
      },
      "tags" : {
         "valueType" : "item"
      },
      "user" : {
         "valueType" : "item"
      }      
   },
   
   "items" : [
   	  
	  {  
         "type" : "Bookmark",
         "id"   : "https://puma.ub.uni-stuttgart.de/url/6536b422328060d3ecb739ed14133336/diglezakis",
         "tags" : [
            "artificialIntelligence","tools","llm"
         ],
         
         "intraHash" : "6536b422328060d3ecb739ed14133336",
         "label" : "Ollama",
         "user" : "diglezakis",
         "description" : "\r\nGet up and running with large language models.\r\nRun Llama 3.3, Phi 4, Mistral, Gemma 2, and other models. Customize and create your own.",
         "date" : "2025-01-24 16:07:20",
         "changeDate" : "2025-01-24 16:07:20",
         "count" : 1,
         "url" : "https://ollama.com/"

      }
,
      {
         "type" : "Publication",
         "id"   : "https://puma.ub.uni-stuttgart.de/bibtex/2993ffe08eb3813d0a25b30b88a2d8b0f/diglezakis",         
         "tags" : [
            "metadata","ontologie","artificialIntelligence","llm"
         ],
         
         "intraHash" : "993ffe08eb3813d0a25b30b88a2d8b0f",
         "interHash" : "d2f87ced0951cd393afba2fc4f3b7bb9",
         "label" : "Increasing the LLM Accuracy for Question Answering: Ontologies to the Rescue!",
         "user" : "diglezakis",
         "description" : "[2405.11706] Increasing the LLM Accuracy for Question Answering: Ontologies to the Rescue!",
         "date" : "2024-11-18 15:33:26",
         "changeDate" : "2024-11-18 15:33:26",
         "count" : 1,
         "pub-type": "misc",
         
         "year": "2024", 
         "url": "https://arxiv.org/abs/2405.11706", 
         
         "author": [ 
            "Dean Allemang","Juan Sequeda"
         ],
         "authors": [
         	
            	{"first" : "Dean",	"last" : "Allemang"},
            	{"first" : "Juan",	"last" : "Sequeda"}
         ],
         
         "eprint" : "2405.11706",
         
         "archiveprefix" : "arXiv",
         
         "primaryclass" : "cs.AI",
         
         "bibtexKey": "allemang2024increasingllmaccuracyquestion"

      }
	  
   ]
}
