Skip to content

Commit f56221a

Browse files
authored
Merge pull request #8 from AKSW/dev
added some missing urls and other fields, updated Meyer 2024 ChatGPT Experiments
2 parents 1a501e3 + e631dec commit f56221a

File tree

1 file changed

+33
-12
lines changed

1 file changed

+33
-12
lines changed

aksw.bib

Lines changed: 33 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -12411,6 +12411,10 @@ @InProceedings{Dziwis2022OntoflowUserFriendly
1241112411
booktitle = {Proceedings of International Workshop on Semantic Industrial Information Modelling (SemIIM) @ ESWC22},
1241212412
title = {Ontoflow: A User-Friendly Ontology Development Workflow},
1241312413
year = {2022},
12414+
series = {CEUR Workshop Proceedings},
12415+
volume = {3355},
12416+
abstract = {For many years, the development of widely applicable and high-quality ontologies has been an ongoing research topic. Among the various challenges, the lack of integrated development environments for non-technical domain experts has been one of the most pressing research issues. But while the participation of domain experts is vital for the applicability of ontologies, there are hardly any software tools available that facilitate their active engagement. We present a solution that addresses this research gap by automating the ontology development process with the help of a workflow engine. We define a pipeline that facilitates ontology implementation, serialization, documentation and testing within the scope of a seamless automatic routine that can be easily set up by the ontology engineer and triggered by a non-technical domain expert. Thus, the processing pipeline takes care of most of the operations that usually have to be carried out by an ontology or software engineer. We demonstrate the applicability of the approach by developing an ontology with OntoFlow and validating its functioning with a large-scale ontology dataset from Linked Open Vocabularies (LOV).},
12417+
issn = {1613-0073},
1241412418
keywords = {group_aksw sys:relevantFor:infai es wenige lpmeyer martin},
1241512419
url = {https://ceur-ws.org/Vol-3355/ontoflow.pdf},
1241612420
}
@@ -12420,10 +12424,13 @@ @InProceedings{Frey2023BenchmarkingAbilitiesLarge
1242012424
booktitle = {Proceedings of Workshop Deep Learning for Knowledge Graphs (DL4KG) @ ISWC23},
1242112425
title = {Benchmarking the Abilities of Large Language Models for RDF Knowledge Graph Creation and Comprehension: How Well Do LLMs Speak Turtle?},
1242212426
year = {2023},
12427+
series = {CEUR Workshop Proceedings},
12428+
volume = {3559},
1242312429
abstract = {Large Language Models (LLMs) are advancing at a rapid pace, with significant improvements at natural language processing and coding tasks. Yet, their ability to work with formal languages representing data, specifically within the realm of knowledge graph engineering, remains under-investigated. To evaluate the proficiency of various LLMs, we created a set of five tasks that probe their ability to parse, understand, analyze, and create knowledge graphs serialized in Turtle syntax. These tasks, each embodying distinct degrees of complexity and being able to scale with the size of the problem, have been integrated into our automated evaluation system, the LLM-KG-Bench. The evaluation encompassed four commercially available LLMs - GPT-3.5, GPT-4, Claude 1.3, and Claude 2.0, as well as two freely accessible offline models, GPT4All Vicuna and GPT4All Falcon 13B. This analysis offers an in-depth understanding of the strengths and shortcomings of LLMs in relation to their application within RDF knowledge graph engineering workflows utilizing Turtle representation. While our findings show that the latest commercial models outperform their forerunners in terms of proficiency with the Turtle language, they also reveal an apparent weakness. These models fall short when it comes to adhering strictly to the output formatting constraints, a crucial requirement in this context.},
1242412430
comment = {Code: https://github.com/AKSW/LLM-KG-Bench
1242512431
Results: https://github.com/AKSW/LLM-KG-Bench-Results/tree/main/2023-DL4KG_Turtle-KG-Eval},
1242612432
doi = {10.48550/ARXIV.2309.17122},
12433+
issn = {1613-0073},
1242712434
keywords = {group_aksw sys:relevantFor:infai es frey lpmeyer arndt},
1242812435
url = {https://ceur-ws.org/Vol-3559/paper-3.pdf},
1242912436
}
@@ -12433,23 +12440,35 @@ @InProceedings{Meyer2023DevelopingScalableBenchmark
1243312440
booktitle = {Proceedings of Poster Track of Semantics 2023},
1243412441
title = {Developing a Scalable Benchmark for Assessing Large Language Models in Knowledge Graph Engineering},
1243512442
year = {2023},
12443+
pages = {16--20},
12444+
series = {CEUR Workshop Proceedings},
12445+
volume = {3526},
1243612446
abstract = {As the field of Large Language Models (LLMs) evolves at an accelerated pace, the critical need to assess and monitor their performance emerges. We introduce a benchmarking framework focused on knowledge graph engineering (KGE) accompanied by three challenges addressing syntax and error correction, facts extraction and dataset generation. We show that while being a useful tool, LLMs are yet unfit to assist in knowledge graph generation with zero-shot prompting. Consequently, our LLM-KG-Bench framework provides automatic evaluation and storage of LLM responses as well as statistical data and visualization tools to support tracking of prompt engineering and model performance.},
1243712447
comment = {Code: https://github.com/AKSW/LLM-KG-Bench
1243812448
Results: https://github.com/AKSW/LLM-KG-Bench-Results/blob/main/2023-SEMANTICS_LLM-KGE-Bench-Results},
1243912449
doi = {10.48550/ARXIV.2308.16622},
12450+
issn = {1613-0073},
1244012451
keywords = {group_aksw sys:relevantFor:infai es lpmeyer frey junghanns martin},
12452+
url = {https://ceur-ws.org/Vol-3526/paper-04.pdf},
1244112453
}
1244212454

12443-
@Article{Meyer2023LLMassistedKnowledge,
12444-
author = {Meyer, Lars-Peter and Stadler, Claus and Frey, Johannes and Radtke, Norman and Junghanns, Kurt and Meissner, Roy and Dziwis, Gordian and Bulert, Kirill and Martin, Michael},
12445-
title = {LLM-assisted Knowledge Graph Engineering: Experiments with ChatGPT},
12446-
year = {2023},
12447-
abstract = {Knowledge Graphs (KG) provide us with a structured, flexible, transparent, cross-system, and collaborative way of organizing our knowledge and data across various domains in society and industrial as well as scientific disciplines. KGs surpass any other form of representation in terms of effectiveness. However, Knowledge Graph Engineering (KGE) requires in-depth experiences of graph structures, web technologies, existing models and vocabularies, rule sets, logic, as well as best practices. It also demands a significant amount of work. Considering the advancements in large language models (LLMs) and their interfaces and applications in recent years, we have conducted comprehensive experiments with ChatGPT to explore its potential in supporting KGE. In this paper, we present a selection of these experiments and their results to demonstrate how ChatGPT can assist us in the development and management of KGs.},
12448-
comment = {to appear in proceedings of AI Tomorrow 2023
12449-
12450-
Results: https://github.com/AKSW/AI-Tomorrow-2023-KG-ChatGPT-Experiments},
12451-
doi = {10.48550/ARXIV.2307.06917},
12452-
keywords = {group_aksw sys:relevantFor:infai es lpmeyer stadler frey radtke junghanns meissner martin},
12455+
@InProceedings{Meyer2023LLMassistedKnowledge,
12456+
author = {Meyer, Lars-Peter and Stadler, Claus and Frey, Johannes and Radtke, Norman and Junghanns, Kurt and Meissner, Roy and Dziwis, Gordian and Bulert, Kirill and Martin, Michael},
12457+
booktitle = {First Working Conference on Artificial Intelligence Development for a Resilient and Sustainable Tomorrow (AITomorrow) 2023},
12458+
title = {LLM-assisted Knowledge Graph Engineering: Experiments with ChatGPT},
12459+
year = {2024},
12460+
address = {Wiesbaden},
12461+
editor = {Christian Zinke-Wehlmann and Julia Friedrich},
12462+
month = apr,
12463+
pages = {103-115},
12464+
publisher = {Springer Fachmedien Wiesbaden},
12465+
series = {Informatik aktuell},
12466+
abstract = {Knowledge Graphs (KG) provide us with a structured, flexible, transparent, cross-system, and collaborative way of organizing our knowledge and data across various domains in society and industrial as well as scientific disciplines. KGs surpass any other form of representation in terms of effectiveness. However, Knowledge Graph Engineering (KGE) requires in-depth experiences of graph structures, web technologies, existing models and vocabularies, rule sets, logic, as well as best practices. It also demands a significant amount of work. Considering the advancements in large language models (LLMs) and their interfaces and applications in recent years, we have conducted comprehensive experiments with ChatGPT to explore its potential in supporting KGE. In this paper, we present a selection of these experiments and their results to demonstrate how ChatGPT can assist us in the development and management of KGs.},
12467+
comment = {Results: https://github.com/AKSW/AI-Tomorrow-2023-KG-ChatGPT-Experiments},
12468+
doi = {10.1007/978-3-658-43705-3_8},
12469+
isbn = {978-3-658-43705-3},
12470+
keywords = {group_aksw sys:relevantFor:infai es lpmeyer stadler frey radtke junghanns meissner martin},
12471+
url = {https://link.springer.com/chapter/10.1007/978-3-658-43705-3_8},
1245312472
}
1245412473

1245512474
@InProceedings{bike2023carmo,
@@ -12512,10 +12531,11 @@ @InProceedings{Brei2024Leveragingsmalllanguage
1251212531
title = {Leveraging small language models for Text2SPARQLtasks to improve the resilience of AI assistance},
1251312532
year = {2024},
1251412533
editor = {Julia Holze and Sebastian Tramp and Michael Martin and Sören Auer and Ricardo Usbeck and Nenad Krdzavac},
12515-
series = {CEUR-WS},
12534+
series = {CEUR Workshop Proceedings},
1251612535
volume = {3707},
1251712536
abstract = {In this work we will show that language models with less than one billion parameters can be used to translate natural language to SPARQL queries after fine-tuning. Using three different datasets ranging from academic to real world, we identify prerequisites that the training data must fulfill in order for the training to be successful. The goal is to empower users of semantic web technology to use AI assistance with affordable commodity hardware, making them more resilient against external factors},
1251812537
doi = {10.48550/arXiv.2405.17076},
12538+
issn = {1613-0073},
1251912539
keywords = {group_aksw sys:relevantFor:infai es frey lpmeyer},
1252012540
url = {https://ceur-ws.org/Vol-3707/D2R224_paper_5.pdf},
1252112541
}
@@ -12548,9 +12568,10 @@ @InCollection{Meyer2024AssessingSparqlCapabilititesLLM
1254812568
title = {Assessing SPARQL capabilities of Large Language Models},
1254912569
year = {2024},
1255012570
abstract = {The integration of Large Language Models (LLMs) with Knowledge Graphs (KGs) offers significant synergistic potential for knowledge-driven applications. One possible integration is the interpretation and generation of formal languages, such as those used in the Semantic Web, with SPARQL being a core technology for accessing KGs. In this paper, we focus on measuring out-of-the box capabilities of LLMs to work with SPARQL and more specifically with SPARQL SELECT queries applying a quantitative approach. We implemented various benchmarking tasks in the LLM-KG-Bench framework for automated execution and evaluation with several LLMs. The tasks assess capabilities along the dimensions of syntax, semantic read, semantic create, and the role of knowledge graph prompt inclusion. With this new benchmarking tasks, we evaluated a selection of GPT, Gemini, and Claude models. Our findings indicate that working with SPARQL SELECT queries is still challenging for LLMs and heavily depends on the specific LLM as well as the complexity of the task. While fixing basic syntax errors seems to pose no problems for the best of the current LLMs evaluated, creating semantically correct SPARQL SELECT queries is difficult in several cases.},
12551-
comment = {to appear in Proceedings of Workshop NLP4KGC @SEMANTICS 2024},
12571+
comment = {to appear in Proceedings of Workshop NLP4KGC @SEMANTICS 2024 at https://ceur-ws.org},
1255212572
doi = {10.48550/ARXIV.2409.05925},
1255312573
keywords = {group_aksw sys:relevantFor:infai es lpmeyer brei frey arndt},
12574+
url = {https://arxiv.org/pdf/2409.05925},
1255412575
}
1255512576

1255612577
@Comment{jabref-meta: databaseType:bibtex;}

0 commit comments

Comments
 (0)