<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">Integrating I4.0 Knowledge Graphs with Large Language Models Beyond SPARQL Endpoints</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Abdul</forename><surname>Wahid</surname></persName>
							<email>a.wahid2@universityofgalway.ie</email>
							<affiliation key="aff0">
								<orgName type="department">Data Science Institute</orgName>
								<orgName type="institution">University of Galway</orgName>
								<address>
									<addrLine>IDA Business Park, Lower Dangan Galway</addrLine>
									<postCode>H91</postCode>
									<country key="IE">Ireland</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">AEX4</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Muhammad</forename><surname>Yahya</surname></persName>
							<email>muhammad.yahya@valeo.com</email>
						</author>
						<author>
							<persName><forename type="first">Farooq</forename><surname>Zaman</surname></persName>
							<affiliation key="aff2">
								<orgName type="institution">Valeo Vision Systems</orgName>
								<address>
									<settlement>Tuam, Galway</settlement>
									<country key="IE">Ireland</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="institution">Information Technology University</orgName>
								<address>
									<settlement>Lahore</settlement>
									<country key="PK">Pakistan</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Baifan</forename><surname>Zhou</surname></persName>
							<email>baifan.zhou@oslomet.no</email>
							<affiliation key="aff4">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">Oslo Metropolitan University</orgName>
								<address>
									<settlement>Oslo</settlement>
									<country key="NO">Norway</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">John</forename><forename type="middle">G</forename><surname>Breslin</surname></persName>
							<email>john.breslin@universityofgalway.ie</email>
							<affiliation key="aff5">
								<orgName type="department">Data Science Institute</orgName>
								<orgName type="institution">University of Galway</orgName>
								<address>
									<addrLine>IDA Business Park, Lower Dangan Galway</addrLine>
									<postCode>H91 AEX4</postCode>
									<country key="IE">Ireland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Muhammad</forename><forename type="middle">Ali</forename><surname>Intizar</surname></persName>
							<affiliation key="aff6">
								<orgName type="department">School of Electronic Engineering</orgName>
								<orgName type="institution">Dublin City University</orgName>
								<address>
									<settlement>Dublin</settlement>
									<country key="IE">Ireland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Evgeny</forename><surname>Kharlamov</surname></persName>
							<email>evgeny.kharlamov@de.bosch.com</email>
							<affiliation key="aff7">
								<orgName type="institution">Bosch Center for AI</orgName>
								<address>
									<settlement>Renningen</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff8">
								<orgName type="laboratory" key="lab1">SemIIM2024</orgName>
								<orgName type="laboratory" key="lab2">Third International Workshop on Semantic Industrial Information Modelling</orgName>
								<address>
									<addrLine>Nov 11, 2024 -Nov 15</addrLine>
									<postCode>2024</postCode>
									<settlement>Baltimore</settlement>
									<country key="US">USA</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">Integrating I4.0 Knowledge Graphs with Large Language Models Beyond SPARQL Endpoints</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">8D188ECE7A7ED02A1896FBE0CDF7F6E8</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2025-04-23T16:47+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Knowledge Graphs, LLaMa, SPARQL, Large Language Model (LLM), Deep Learning Orcid 0000-0003-2625-276X (A. Wahid)</term>
					<term>0000-0002-5766-5172 (M. Yahya)</term>
					<term>0000-0003-3698-0541 (B. Zhou)</term>
					<term>0000-0001-5790-050X (J. G. Breslin)</term>
					<term>0000-0002-0674-2131 (M. A. Intizar)</term>
					<term>0000-0003-3247-4166 (E. Kharlamov)</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Industry 4.0 (I4.0) knowledge graphs are a common way to represent industrial information models. Conventional SPARQL querying systems require the users to be familiar with the data schema and SPARQL syntax. However, this is often very difficult for many users in industrial production, who have mostly an engineering background, instead of a semantic web. Recent developments in large language models (LLMs) make it possible for non-semantic experts to use natural language to query knowledge graphs (KG). In this work, we present a framework and preliminary results of integrating Industry 4.0 KGs with LLMs to improve how data is represented, reasoned, and processed in manufacturing contexts, facilitating user interaction with KGs and contributing to operational efficiency. Our technique enhances Language Models (LLMs) by utilising the semantic complexity and interdependence of Knowledge Graphs (KGs). This allows us to incorporate domain-specific knowledge. We used the FAISS library and LLaMA2 to optimise the storage and retrieval of vectors, which improved the system's performance and scalability. This integration allows for advanced fault detection, proactive maintenance, and process optimisation, resulting in decreased periods of inactivity and improved productivity. We introduce the framework's architecture, implementation strategy, and possible advantages while also discussing the difficulties associated with data integration and scalability. The results of our study show that the integration of KG-LLM surpasses traditional approaches in terms of operational efficiency, as evidenced by enhanced fault detection, proactive maintenance, and process optimisation, thereby opening up possibilities for the advancement of more intelligent and resilient production systems.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>In Industry 4.0 (I4.0), digital twins of industrial assets are built thanks to the massive collection and representation of industrial data and knowledge. Industrial information models are a widely applied type of digital twin, and their adoption continues to draw increasing attention. Knowledge Graphs (KG) are a common way to represent industrial information models <ref type="bibr" target="#b0">[1]</ref>. A Knowledge Graph is a data structure designed to gather and share information about the real world. The nodes in the network represent entities of interest, while the edges represent various relationships between these entities <ref type="bibr" target="#b1">[2]</ref>. It semantically models the data in a structured way and means and extracts knowledge using deductive and inductive techniques <ref type="bibr" target="#b1">[2]</ref>. In recent years, the use of Knowledge Graphs has become essential in various industrial companies, including Bosch <ref type="bibr" target="#b2">[3]</ref>, Siemens <ref type="bibr" target="#b3">[4]</ref>, Airbus <ref type="bibr" target="#b4">[5]</ref>, and others. KGs address the issue of diverse data by unifying the data sources into a cohesive structure, enabling streamlined data retrieval through a SPARQL endpoint <ref type="bibr" target="#b12">[13]</ref>. This integration reduces the effort required for data access and enhances data connectivity and cohesiveness across the enterprise. Figure <ref type="figure" target="#fig_0">1</ref> illustrates the architecture of the enterprise system that utilises the KGs for integrated data retrieval.</p><p>To demonstrate our proposed framework's practical application and effectiveness, we utilise a specific use case: a Football Production Line. This production line consists of multiple machines responsible for various stages of football manufacturing, including material preparation, stitching, and quality control, as shown in Figure <ref type="figure" target="#fig_0">1</ref>. Each machine generates vast amounts of data related to processes, tools, sensor readings, and operational parameters. This data is essential for real-time monitoring, fault detection, and process optimisation, but the complexity of querying such diverse information through traditional methods like SPARQL presents a significant barrier for non-expert users. Therefore, by integrating a Knowledge Graph (KG) with large language models (LLMs), we enable natural language querying, making the data more accessible to users without semantic web expertise. Figure <ref type="figure" target="#fig_0">1</ref> illustrates the architecture of the generalised framework we propose, which can be applied across various industrial settings, with the Football Production Line serving as our use case to validate the system.</p><p>However, to maximise the utility of these SPARQL endpoints, it is crucial to provide comprehensive training for manufacturing line end users, including engineers, supervisors, operators, and other personnel, who are normally non-semantic experts and are not familiar with technologies such as ontology and SPARQL. This lack of expertise and excessive training costs impede the adoption and development of industrial information models represented in KGs. The latest progress in Large Language Models (LLMs) provides an opportunity to support users in visually examining and analysing KGs. LLMs like BERT, RoBERTa, and T5, pre-trained on extensive corpora, excel in various NLP tasks such as question answering, machine translation, and text generation <ref type="bibr" target="#b7">[8]</ref>. Advanced LLMs like ChatGPT and PaLM2, with billions of parameters, show great promise in complex tasks such as education, code generation, and recommendation <ref type="bibr" target="#b8">[9]</ref>. Similarly, the end users in the manufacturing line of I4.0 can leverage the KGs by integrating them with LLMs where they can fetch the information using natural language.</p><p>In this paper, we propose an approach that utilises LLMs for user interaction with industrial KGs, reducing the training time, cost, and expertise barrier that I4.0 users may confront within the conventional SPARQL querying approach. The paper provides a literature review. (Section 2), presents the proposed architecture (Section 3) provides experimental analysis (Section 4). Finally, section 5 concludes the paper and suggests possible future work.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Literature Review</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.">Knowledge Graphs in Manufacturing</head><p>In recent years, Knowledge Graphs (KGs) have played a crucial role in creating digital twins (DT) of physical systems for Industry 4.0, to improve the management of manufacturing processes <ref type="bibr" target="#b9">[10]</ref>, <ref type="bibr" target="#b10">[11]</ref>. Furthermore, the implementation of I4.0 has played a crucial role in resolving concerns regarding interoperability and data integration, as stated in <ref type="bibr" target="#b11">[12]</ref>, <ref type="bibr" target="#b13">[14]</ref>. The authors presented the Bosch Industry 4.0 Knowledge Graph (BI40KG), which is a methodology that uses ontologies to integrate data sources in a Knowledge Graph. This methodology enhances interoperability and traceability within Industry 4.0 environments <ref type="bibr" target="#b14">[15]</ref>. A separate investigation examined the utilisation of Knowledge Graph embeddings (KGE) in integrating data for monitoring the quality of automobile welding. This study emphasised the difficulties and potential benefits associated with this approach <ref type="bibr" target="#b15">[16]</ref>. This study addressed the complex challenge of combining data to determine the diameter of welding spots and identify car bodies. Nevertheless, there is a notable obstacle in enabling end-users, including engineers, device operators, and supervisors, to efficiently utilise these Knowledge Graphs. The current method requires intensive training sessions and workshops to educate users on how to use SPARQL endpoints. It is essential to undergo this training because SPARQL endpoints do not provide sufficient assistance for natural language searches. As a result, users must possess a thorough grasp of query formulation to obtain relevant results.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.">Integration of KGs and LLMs</head><p>A collaborative training-free reasoning schema that combines KGs and LLMs to address challenges faced by LLMs in practical applications such as hallucinations, knowledge updating issues, and limited transparency in reasoning was proposed in <ref type="bibr" target="#b16">[17]</ref>. The technique involves LLMs systematically exploring the KG to retrieve knowledge subgraphs that are relevant to the task. This helps guide the LLMs in combining implicit knowledge for reasoning on the subgraph. In <ref type="bibr" target="#b17">[18]</ref> a combination of qualitative and quantitative research methodologies was employed to investigate how LLMs might aid users in the exploration and analysis of Knowledge Graphs (KGs). Specifically, it examines the use of collaborative query formulation, multi-turn discussion for discovering relationships, and the creation of on-demand visualisations. The concept of utilising universal LLMs to create KGs for extracting knowledge from complicated texts was introduced in <ref type="bibr" target="#b18">[19]</ref>. This was followed by the process of updating domain-specific LLMs through knowledge editing, resulting in high levels of accuracy in question-answer tasks across several domains. In <ref type="bibr" target="#b19">[20]</ref>, LLMs were assessed for their ability to create and reason with KGs. This evaluation involved eight datasets that encompassed tasks such as entity, relation, and event extraction, link prediction, and question answering. A recent study examines the use of ChatGPT in performing experiments to investigate its capabilities in assisting Knowledge Graph Engineering (KGE) <ref type="bibr" target="#b20">[21]</ref>. The paper presents findings that indicate how ChatGPT can aid in the creation and administration of KGs.</p><p>The literature indicates that LLMs are being integrated with KGs to improve their construction and logical reasoning <ref type="bibr" target="#b19">[20]</ref>, <ref type="bibr" target="#b21">[22]</ref>, <ref type="bibr" target="#b22">[23]</ref>, <ref type="bibr" target="#b23">[24]</ref>. Notably, models like GPT-4 outperform ChatGPT in many tasks and outperform models optimised for certain reasoning and question-answering datasets <ref type="bibr" target="#b19">[20]</ref>. This shows that LLMs can now extract and reason about knowledge in KGs, making them useful in complicated information systems.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Our Approach</head><p>The primary goal of this work is to integrate Industry 4.0 Knowledge Graphs (KGs) with Large Language Models (LLMs) to enable natural language querying of industrial data. This approach allows nonsemantic experts such as engineers and operators to interact with the KG using simple natural language, without needing specialized knowledge of SPARQL or ontology. The expected outcome is to increase the accessibility of industrial data, leading to improved operational efficiency, fault detection, proactive maintenance, and process optimisation. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1.">Football Production Line Dataset</head><p>The football production process involves nine machines, each generating data related to tools, critical parameters, and sensor outputs. This data is gathered and analysed using Reference Generic Ontology Model (RGOM) classes and relations, and subsequently mapped to ontology terms via the Jena API <ref type="foot" target="#foot_0">3</ref> , resulting in an RDF triple store referred to as the Industry 4.0 Knowledge Graph (I4.0 KG) <ref type="bibr" target="#b24">[25]</ref>. On average, each football production cycle generates 1730 triples. With 36 footballs produced per hour, this results in approximately 22,150 triples generated hourly from 2,903 distinct individuals (entities), which represent machines, tools, sensors, and other relevant components of the production line. These triples are stored in CSV format to facilitate integration with the Large Language Model (LLM).</p><p>The RGOM serves as a set of predefined classes that categorise and organise knowledge related to the football production process. These classes represent different entities involved in production, such as machines, tools, and processes, along with their interrelations. By mapping production data to RGOM classes, we create a structured Knowledge Graph that can be queried and reasoned upon. The 1730 triples generated per football production cycle represent real-time data on various aspects of the production line, including sensor readings, machine status, tool usage, and process parameters. These triples are dynamic and continuously updated as the production process evolves. The average of 1730 triples is tied to each football's production cycle, reflecting real-time updates in the Knowledge Graph.</p><p>With 36 footballs produced each hour, the system generates approximately 22,150 triples, collected from 2,903 distinct individuals (entities such as machines, tools, and sensors). The Knowledge Graph grows dynamically with each production cycle, adding new data. Our system architecture ensures that the data is scalable, using FAISS for vector-based retrieval to efficiently handle large volumes of information and provide quick access to relevant data even as the graph grows continuously.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2.">Overview of the Proposed Framework</head><p>A Knowledge Graph (KG) can be represented as a labelled directed graph 𝐺 = (𝑉 𝑒 , 𝐸, 𝑇 ), where 𝑉 𝑒 and 𝐸 are sets of nodes and labels representing entities and relations, respectively, and 𝑇 represents the triples. Each triple (𝑠, 𝑟, 𝑜) consists of a subject 𝑠, a relation 𝑟, and an object 𝑜. To enhance the usability and efficiency of the KG, we generate embeddings for its components and store them in a vector space.</p><p>The embedding are generated using sentence transformer, an efficient Embedding model for converting text to embeddings <ref type="bibr" target="#b25">[26]</ref>. By leveraging sentence transformer, we can transform the KG into a vector store, where embedding for each triple are computed and stored. Specifically, for each triple (𝑠, 𝑟, 𝑜), we compute embedding for the subject 𝑠, the relation 𝑟, and the object 𝑜. These embedding are then stored in the FAISSFAISS<ref type="foot" target="#foot_1">2</ref> vector store, enabling efficient vector-based retrieval.</p><p>To facilitate the retrieval of relevant triples from the KG, we define a retrieval function that calculates the cosine similarity between a query embedding and the pre-computed embeddings of the triples stored in the vector store by building a data structure in RAM from a given set of vectors 𝑥 1 , ..., 𝑥 𝑛 in dimension 𝑑. The cosine similarity function is computed as shown in 1. When a query is issued, the function computes the cosine similarity scores between the query embedding and all stored embeddings, retrieving the triples with the highest similarity scores. This approach allows for efficient and accurate query matching within the Knowledge Graph, leveraging the power of vector space representations and similarity search to enhance the usability of the KG for various applications.</p><formula xml:id="formula_0">𝑖 = 𝑎𝑟𝑔𝑚𝑖𝑛 𝑖 ∥ 𝑥 − 𝑥 1 ∥<label>(1)</label></formula><p>Where ∥ . ∥ is the Euclidean distance (L2) and the dimension of 𝑥 𝑖 needs to be fixed.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.3.">Integration with LLM</head><p>FAISS converted our knowledge network into a vector store, and Meta AI's LLaMA2 enhanced triple explanations based on user questions. This approach required multiple phases to make information relevant and clear. First, we created and refined an LLaMA2 prompt. This prompt taught the language model to explain retrieved triples clearly and contextually. We made the prompt's explanations relevant to the user's informational needs by providing contextual information from the query. We then customised and fine-tuned LLaMA2 to comprehend our Knowledge Graph's structure and semantics.</p><p>Training the model with triples and their explanations enhanced its accuracy and relevance. A user query is processed to extract relevant embeddings, and the retrieval function finds triples with the highest cosine similarity scores. After receiving these triples, fine-tuned LLaMA2 provides thorough explanations. These explanations make triple connections and entities easier to understand. Our Knowledge Graph retrieval procedure is much more usable with LLaMA2 because it simplifies complex data. The Knowledge Graph is useful for research, education, and data-driven decision-making since the model delivers context-aware responses to user queries.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.4.">Proposed Methodology</head><p>Our strategy focused on prioritising prompt engineering rather than fine-tuning or training the LLM from scratch. Prompt engineering refers to the process of creating precise prompts to direct the LLM in producing desired results, without making changes to the model's underlying parameters or retraining it. This approach is more effective and flexible, enabling us to utilise the advantages of existing LLMs without requiring significant computational resources.</p><p>Our tests utilised an enhanced iteration of LLaMA2, developed and compiled in C++, with a specific focus on effortless integration with Python. This optimisation ensured that the model could operate with greater efficiency and effectiveness within our current Python-based infrastructure. Through prioritising prompt engineering, we successfully utilised the capabilities of LLaMA2 to produce concise and contextually appropriate explanations for the triples obtained from our KG. This improved the user experience significantly without requiring considerable retraining of the model.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Results and Discussions</head><p>Instead of relying on traditional SPARQL queries, our approach leverages natural language prompts with the help of LLaMA2. This enables end users to interact with the Knowledge Graph in plain language, significantly enhancing accessibility and ease of use. For example, as shown in Figure <ref type="figure" target="#fig_2">3</ref>, when queried about the temperature of 𝑀𝑎𝑐ℎ𝑖𝑛𝑒 6 from "12:06 12:30:31 to 06-12 12:47:57," the system analyzed the query and retrieved the relevant triples from the Knowledge Graph. The results indicated that no observations of Machine_6's temperature were found during the specified period. Listing 2 shows the SPARQL query that typically returns this result. Using natural language queries removes the need for SPARQL understanding or logical reasoning, making it easier for non-experts to access and query the Knowledge Graph without requiring specialized query language knowledge. ?tool sosa:madeObservation ?observation. ?observation sosa:hasSimpleResult ?result.</p><p>?time tm:hasStartTime ?Start_time. } Similarly, as shown in Figure <ref type="figure" target="#fig_4">4</ref>, when queried about the status of the 𝑚𝑎𝑐ℎ𝑖𝑛𝑒 2 motor from "2021-06-01T 10:11:00Z to 2021-06-01T 10:12:55Z", the system determined that the motor was continuously working throughout the specified time range, with no changes in status. The retrieved triples, such as /Machine2_Motor_State_2021 hasState working, confirmed the motor's operational state. Like the previous example, Listing 3 shows that natural language querying is intuitive and easy, allowing nondomain specialists to retrieve the necessary information from the Knowledge Graph without needing to understand SPARQL or other complex query languages.</p><p>Our proposed methodology leverages the advanced capabilities of LLaMA2 to interpret these natural language queries. Through prompt engineering, we customised the model's responses to generate detailed and contextually relevant explanations for each retrieved triple. This approach allows us to achieve the desired output without performing additional model training or fine-tuning. When user queries are turned into embeddings, the retrieval function finds the cosine similarity scores between the query embedding and the triples' embeddings that have already been computed and stored in the vector space. As a result, our method makes Knowledge Graphs easier to use by letting people interact with them using natural language and supporting it with the advanced interpretation skills of LLMs. This lets people who do not know SPARQL before have a smooth and effective querying experience.</p><p>The examples in this work utilise exact timestamps (e.g., "12:06:12 to 12:47:57") because the Knowledge Graph stores precise sensor data at specific times. However, we recognise that typical users may input broader, more natural time ranges (e.g., "12:00 to 12:30") when querying the system. Currently, the system requires an exact match with the stored timestamps to retrieve the relevant triples. This reliance on precision can pose a limitation, as queries that do not exactly match the stored times (e.g., "12:00" instead of "12:06:12") may fail to return results. To overcome this limitation, we propose enhancing the system with a time-range estimation mechanism. This enhancement would allow the system to interpret user-specified time ranges and map them to the closest matching timestamps in the Knowledge Graph. For example, a query for "12:00 to 12:30" would approximate and retrieve the relevant data, even if the exact timestamps stored in the Knowledge Graph are "12:06:12 to 12:47:57. " This improvement would significantly increase the flexibility and user-friendliness of the querying process, enabling users to ask more natural and practical questions. ?motor smo:hasName ?Motor_Name. ?motor smo:hasMotorState ?state. ?process tm:hasTime ?time. ?state smo:hasState ?Status. ?time tm:hasStartTime ?Start_time. FILTER (?Start_time &gt; "2021-06-01T 10:11:00Z"∧∧xsd:dateTime &amp;&amp; ?Start_time &lt; "2021-06-01T 10:12:55Z"∧∧xsd:dateTime).}</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.">Conclusion and Future Work</head><p>This study investigates the application of LLMs to improve the usability and interpretability of KGs. Through the utilisation of FAISS-based sentence-transformer to turn our KG into a vector store and the integration of a Python-adapted C++-compiled version of LLaMA2 from Meta AI. We have successfully showcased the ability of prompt engineering to produce simple and contextually appropriate explanations for retrieved triples in response to user enquiries. This method, which bypasses the resource-intensive process of comprehensive model training, greatly enhances the accessibility and understanding of data. Our future research will prioritise advanced prompt engineering, enhancing the KG by including more data sources, optimising system efficiency, integrating user feedback, and investigating cross-domain ontology mapping to develop a more resilient and adaptable.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Overview of the enterprise system employing a Knowledge Graph for unified data retrieval.</figDesc><graphic coords="2,93.97,446.80,407.33,85.08" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: Integration of I4.0 KG with a large language model (LLM). The Knowledge Graph facilitates entity and relation extraction, producing the corresponding embeddings. These embeddings, representing subjects, predicates, and objects, are processed through an adapter module. The adapter integrates the structured knowledge into the LLM, enhancing its natural language understanding and generative capabilities.</figDesc><graphic coords="4,123.75,207.13,347.77,157.51" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 3 :</head><label>3</label><figDesc>Figure 3: The system's response to the query about the temperature of 𝑀𝑎𝑐ℎ𝑖𝑛𝑒_6. The output indicates no temperature observations within the specified period.</figDesc><graphic coords="6,72.00,65.61,451.26,184.17" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Listing 2 :</head><label>2</label><figDesc>SPARQL to Query retrieve the temperature results for 𝑚𝑎𝑐ℎ𝑖𝑛𝑒_6 for a specified time PREFIX smo: &lt;http://www.semanticweb.org/manufacturingproductionline/&gt; PREFIX d: &lt;http://www.semanticweb.org/manufacturingproductionline/data/&gt; PREFIX tm: &lt;http://www.w3.org/2006/time#&gt; PREFIX sosa: &lt;http://www.w3.org/ns/sosa#&gt; SELECT DISTINCT ?machine ?Start_time ?result WHERE { ?machine smo:hasTool ?tool.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Figure 4 :</head><label>4</label><figDesc>Figure 4: The system's response to the query about the status of the 𝑚𝑎𝑐ℎ𝑖𝑛𝑒_2𝑚𝑜𝑡𝑜𝑟.</figDesc><graphic coords="7,72.00,65.61,451.27,123.55" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_5"><head>Listing 3 :</head><label>3</label><figDesc>SPARQL Query to retrieve the motor status of 𝑚𝑎𝑐ℎ𝑖𝑛𝑒_2 PREFIX smo: &lt;http://www.semanticweb.org/manufacturingproductionline/&gt; PREFIX d: &lt;http://www.semanticweb.org/manufacturingproductionline/data/&gt; PREFIX tm: &lt;http://www.w3.org/2006/time#&gt; SELECT DISTINCT ?Motor_Name ?Status ?Start_time WHERE { d:Machine_1 smo:hasTool ?motor.</figDesc></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="3" xml:id="foot_0">https://jena.apache.org</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_1">https://ai.meta.com/tools/faiss/</note>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgments</head><p>This publication has emanated from research conducted with the financial support of Science Foundation Ireland under Grant Number SFI/12/RC/2289_P2 (Insight). For Open Access, the author has applied a CC BY public copyright licence to any Author Accepted Manuscript version arising from this submission.</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">Link Prediction in Industrial Knowledge Graphs: A Case Study on Football Manufacturing</title>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Yahya</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Abdul</forename><surname>Wahid</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Lan</forename><surname>Yang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">John</forename><forename type="middle">G</forename><surname>Breslin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Evgeny</forename><surname>Kharlamov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Intizar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ali</forename></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Access</title>
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Knowledge graphs</title>
		<author>
			<persName><forename type="first">Aidan</forename><surname>Hogan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Eva</forename><surname>Blomqvist</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Michael</forename><surname>Cochez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Claudia</forename><surname>Amato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Gerard</forename><surname>De Melo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Claudio</forename><surname>Gutierrez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Sabrina</forename><surname>Kirrane</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">ACM Computing Surveys (Csur)</title>
		<imprint>
			<biblScope unit="volume">54</biblScope>
			<biblScope unit="issue">4</biblScope>
			<biblScope unit="page" from="1" to="37" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Neuro-Symbolic AI at Bosch: Data Foundation, Insights, and Deployment</title>
		<author>
			<persName><forename type="first">Baifan</forename><surname>Zhou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Zhipeng</forename><surname>Tan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Zhuoxun</forename><surname>Zheng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Dongzhuoran</forename><surname>Zhou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Yunjie</forename><surname>He</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Yuqicheng</forename><surname>Zhu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Yahya</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ISWC (Posters/Demos/Industry)</title>
				<imprint>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Exploring enterprise knowledge graphs: A use case in software engineering</title>
		<author>
			<persName><forename type="first">Marta</forename><surname>Sabou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Fajar</forename><forename type="middle">J</forename><surname>Ekaputra</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Tudor</forename><surname>Ionescu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Juergen</forename><surname>Musil</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Daniel</forename><surname>Schall</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Kevin</forename><surname>Haller</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Armin</forename><surname>Friedl</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Stefan</forename><surname>Biffl</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">The Semantic Web: 15th International Conference, ESWC 2018</title>
				<meeting><address><addrLine>Heraklion, Crete, Greece</addrLine></address></meeting>
		<imprint>
			<publisher>Springer International Publishing</publisher>
			<date type="published" when="2018">June 3-7, 2018. 2018</date>
			<biblScope unit="page" from="560" to="575" />
		</imprint>
	</monogr>
	<note>Proceedings 15</note>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Applying Semantic Web Technologies to Assess Maintenance Tasks from Operational Interruptions: A Use-Case at Airbus</title>
		<author>
			<persName><forename type="first">Ghislain</forename><surname>Atemezing</surname></persName>
		</author>
		<author>
			<persName><surname>Auguste</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">The Semantic Web: 14th International Conference, ESWC 2017</title>
				<meeting><address><addrLine>Portorož, Slovenia</addrLine></address></meeting>
		<imprint>
			<publisher>Springer International Publishing</publisher>
			<date type="published" when="2017-06-01">May 28-June 1, 2017. 2017</date>
			<biblScope unit="page" from="3" to="17" />
		</imprint>
	</monogr>
	<note>Proceedings, Part II 14</note>
</biblStruct>

<biblStruct xml:id="b5">
	<monogr>
		<title level="m" type="main">Towards Semantic Modeling of Camera from Image Quality Testing Perspective: Valeo Vision Systems Case</title>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Yahya</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Aedan</forename><surname>Breathnach</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Faisal</forename><surname>Khan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Iman</forename><surname>Abaspur</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Rajkumar</forename><surname>Ranganathan</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2023">2023</date>
			<publisher>SemIIM</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">Converting property graphs to RDF: a preliminary study of the practical impact of different mappings</title>
		<author>
			<persName><forename type="first">Shahrzad</forename><surname>Khayatbashi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Sebastián</forename><surname>Ferrada</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Olaf</forename><surname>Hartig</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 5th ACM SIGMOD Joint International Workshop on Graph Data Management Experiences &amp; Systems (GRADES) and Network Data Analytics (NDA)</title>
				<meeting>the 5th ACM SIGMOD Joint International Workshop on Graph Data Management Experiences &amp; Systems (GRADES) and Network Data Analytics (NDA)</meeting>
		<imprint>
			<date type="published" when="2022">2022</date>
			<biblScope unit="page" from="1" to="9" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Unifying large language models and knowledge graphs: A roadmap</title>
		<author>
			<persName><forename type="first">Shirui</forename><surname>Pan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Linhao</forename><surname>Luo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Yufei</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Chen</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jiapu</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Xindong</forename><surname>Wu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Transactions on Knowledge and Data Engineering</title>
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">A survey on large language models: Applications, challenges, limitations, and practical usage</title>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Hadi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Rizwan</forename><surname>Usman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Abbas</forename><surname>Qureshi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Shah</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Anas</forename><surname>Irfan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Zafar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Naveed</forename><surname>Bilal Shaikh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jia</forename><surname>Akhtar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Seyedali</forename><surname>Wu</surname></persName>
		</author>
		<author>
			<persName><surname>Mirjalili</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Authorea Preprints</title>
		<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Generating digital twin models using knowledge graphs for industrial production lines</title>
		<author>
			<persName><forename type="first">Agniva</forename><surname>Banerjee</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Raka</forename><surname>Dalal</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Sudip</forename><surname>Mittal</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Karuna Pande</forename><surname>Joshi</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Workshop on Industrial Knowledge Graphs, co-located with the 9th International ACM Web Science Conference</title>
				<imprint>
			<date type="published" when="2017">2017. 2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">Digital Twin Meets Knowledge Graph for Intelligent Manufacturing Processes</title>
		<author>
			<persName><forename type="first">Georgia</forename><surname>Stavropoulou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Konstantinos</forename><surname>Tsitseklis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Lydia</forename><surname>Mavraidi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">-</forename><forename type="middle">I</forename><surname>Kuo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Anastasios</forename><surname>Chang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Vasileios</forename><surname>Zafeiropoulos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Symeon</forename><surname>Karyotis</surname></persName>
		</author>
		<author>
			<persName><surname>Papavassiliou</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Sensors</title>
		<imprint>
			<biblScope unit="volume">24</biblScope>
			<biblScope unit="issue">8</biblScope>
			<biblScope unit="page">2618</biblScope>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Semantic web and knowledge graphs for industry 4.0</title>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Yahya</surname></persName>
		</author>
		<author>
			<persName><forename type="first">John</forename><forename type="middle">G</forename><surname>Breslin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Intizar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ali</forename></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Applied Sciences</title>
		<imprint>
			<biblScope unit="volume">11</biblScope>
			<biblScope unit="issue">11</biblScope>
			<biblScope unit="page">5110</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">A survey of RDF stores &amp; SPARQL engines for querying knowledge graphs</title>
		<author>
			<persName><forename type="first">Waqas</forename><surname>Ali</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Saleem</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Bin</forename><surname>Yao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Aidan</forename><surname>Hogan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Axel-Cyrille Ngonga</forename><surname>Ngomo</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">The VLDB Journal</title>
		<imprint>
			<biblScope unit="page" from="1" to="26" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Knowledge graph-based manufacturing process planning: A state-of-the-art review</title>
		<author>
			<persName><forename type="first">Youzi</forename><surname>Xiao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Shuai</forename><surname>Zheng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jiancheng</forename><surname>Shi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Xiaodong</forename><surname>Du</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jun</forename><surname>Hong</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Manufacturing Systems</title>
		<imprint>
			<biblScope unit="volume">70</biblScope>
			<biblScope unit="page" from="417" to="435" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">Knowledge graphs for efficient integration and access of manufacturing data</title>
		<author>
			<persName><surname>Grangel-González</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Felix</forename><surname>Irlán</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Anees</forename><surname>Lösch</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Mehdi</forename><surname>Ul</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">25th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA)</title>
				<imprint>
			<publisher>IEEE</publisher>
			<date type="published" when="2020">2020. 2020</date>
			<biblScope unit="volume">1</biblScope>
			<biblScope unit="page" from="93" to="100" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">Literal-aware knowledge graph embedding for welding quality monitoring: a bosch case</title>
		<author>
			<persName><forename type="first">Zhipeng</forename><surname>Tan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Baifan</forename><surname>Zhou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Zhuoxun</forename><surname>Zheng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ognjen</forename><surname>Savkovic</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ziqi</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Irlan-Grangel</forename><surname>Gonzalez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ahmet</forename><surname>Soylu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Evgeny</forename><surname>Kharlamov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">International Semantic Web Conference</title>
				<meeting><address><addrLine>Cham</addrLine></address></meeting>
		<imprint>
			<publisher>Springer Nature Switzerland</publisher>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="453" to="471" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<monogr>
		<title level="m" type="main">An enhanced prompt-based LLM reasoning scheme via knowledge graph-integrated collaboration</title>
		<author>
			<persName><forename type="first">Yihao</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ru</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jianyi</forename><surname>Liu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Gongshen</forename><surname>Liu</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2402.04978</idno>
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b17">
	<monogr>
		<title level="m" type="main">A Preliminary Roadmap for LLMs as Assistants in Exploring, Analyzing, and Visualizing Knowledge Graphs</title>
		<author>
			<persName><forename type="first">Harry</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Gabriel</forename><surname>Appleby</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ashley</forename><surname>Suh</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2404.01425</idno>
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b18">
	<monogr>
		<title level="m" type="main">LLMs Instruct LLMs: An Extraction and Editing Method</title>
		<author>
			<persName><forename type="first">Xin</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Tianjie</forename><surname>Ju</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Huijia</forename><surname>Liang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ying</forename><surname>Fu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Qin</forename><surname>Zhang</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2403.15736</idno>
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b19">
	<monogr>
		<title level="m" type="main">Llms for knowledge graph construction and reasoning: Recent capabilities and future opportunities</title>
		<author>
			<persName><forename type="first">Yuqi</forename><surname>Zhu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Xiaohan</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jing</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Shuofei</forename><surname>Qiao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Yixin</forename><surname>Ou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Yunzhi</forename><surname>Yao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Shumin</forename><surname>Deng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Huajun</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ningyu</forename><surname>Zhang</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2305.13168</idno>
		<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">Llm-assisted knowledge graph engineering: Experiments with chatgpt</title>
		<author>
			<persName><forename type="first">Lars</forename><forename type="middle">-</forename><surname>Meyer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Claus</forename><surname>Peter</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Johannes</forename><surname>Stadler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Norman</forename><surname>Frey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Kurt</forename><surname>Radtke</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Roy</forename><surname>Junghanns</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Gordian</forename><surname>Meissner</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Kirill</forename><surname>Dziwis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Michael</forename><surname>Bulert</surname></persName>
		</author>
		<author>
			<persName><surname>Martin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Working conference on Artificial Intelligence Development for a Resilient and Sustainable Tomorrow</title>
				<meeting><address><addrLine>Wiesbaden; Wiesbaden</addrLine></address></meeting>
		<imprint>
			<publisher>Springer Fachmedien</publisher>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="103" to="115" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<monogr>
		<title level="m" type="main">From human experts to machines: An LLM supported approach to ontology and knowledge graph construction</title>
		<author>
			<persName><forename type="first">Vamsi</forename><surname>Kommineni</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Birgitta</forename><surname>Krishna</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Sheeba</forename><surname>König-Ries</surname></persName>
		</author>
		<author>
			<persName><surname>Samuel</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2403.08345</idno>
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b22">
	<monogr>
		<title level="m" type="main">Knowledge Graph Large Language Model (KG-LLM) for Link Prediction</title>
		<author>
			<persName><forename type="first">Dong</forename><surname>Shu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Tianle</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Mingyu</forename><surname>Jin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Yiting</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Mengnan</forename><surname>Du</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Yongfeng</forename><surname>Zhang</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2403.07311</idno>
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b23">
	<monogr>
		<title level="m" type="main">Head-to-tail: How knowledgeable are large language models (llm)? AKA will llms replace knowledge graphs?</title>
		<author>
			<persName><forename type="first">Kai</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ethan</forename><surname>Yifan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Hanwen</forename><surname>Xu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Yue</forename><surname>Zha</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Xin</forename><surname>Liu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Dong</forename><surname>Luna</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2308.10168</idno>
		<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">A benchmark dataset with Knowledge Graph generation for Industry 4.0 production lines</title>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Yahya</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Aabid</forename><surname>Ali</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Qaiser</forename><surname>Mehmood</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Lan</forename><surname>Yang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">John</forename><forename type="middle">G</forename><surname>Breslin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Muhammad</forename><surname>Intizar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ali</forename></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Semantic Web</title>
		<imprint>
			<biblScope unit="volume">15</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="461" to="479" />
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<monogr>
		<title level="m" type="main">Sentence-bert: Sentence embeddings using siamese bertnetworks</title>
		<author>
			<persName><forename type="first">Nils</forename><surname>Reimers</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Iryna</forename><surname>Gurevych</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1908.10084</idno>
		<imprint>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
