<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">Building Personalised XAI Experiences Through iSee: a Case-Based Reasoning-Driven Platform</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Marta</forename><surname>Caro-Martínez</surname></persName>
							<email>martcaro@ucm.es</email>
							<affiliation key="aff0">
								<orgName type="department">Department of Software Engineering and Artificial Intelligence</orgName>
								<orgName type="institution">Universidad Complutense de Madrid</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Anne</forename><surname>Liret</surname></persName>
							<email>anne.liret@bt.com</email>
							<affiliation key="aff3">
								<orgName type="institution">British Telecommunications</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Belén</forename><surname>Díaz-Agudo</surname></persName>
							<email>belend@ucm.es</email>
							<affiliation key="aff0">
								<orgName type="department">Department of Software Engineering and Artificial Intelligence</orgName>
								<orgName type="institution">Universidad Complutense de Madrid</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Juan</forename><forename type="middle">A</forename><surname>Recio-García</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Software Engineering and Artificial Intelligence</orgName>
								<orgName type="institution">Universidad Complutense de Madrid</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Jesús</forename><surname>Darias</surname></persName>
							<email>jdarias@ucm.es</email>
							<affiliation key="aff0">
								<orgName type="department">Department of Software Engineering and Artificial Intelligence</orgName>
								<orgName type="institution">Universidad Complutense de Madrid</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Nirmalie</forename><surname>Wiratunga</surname></persName>
							<email>n.wiratunga@rgu.ac.uk</email>
							<affiliation key="aff1">
								<orgName type="department">School of Computing</orgName>
								<orgName type="institution">Robert Gordon University</orgName>
								<address>
									<settlement>Aberdeen</settlement>
									<country key="GB">Scotland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Anjana</forename><surname>Wijekoon</surname></persName>
							<email>a.wijekoon1@rgu.ac.uk</email>
							<affiliation key="aff1">
								<orgName type="department">School of Computing</orgName>
								<orgName type="institution">Robert Gordon University</orgName>
								<address>
									<settlement>Aberdeen</settlement>
									<country key="GB">Scotland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Kyle</forename><surname>Martin</surname></persName>
							<email>k.martin3@rgu.ac.uk</email>
							<affiliation key="aff1">
								<orgName type="department">School of Computing</orgName>
								<orgName type="institution">Robert Gordon University</orgName>
								<address>
									<settlement>Aberdeen</settlement>
									<country key="GB">Scotland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Ikechukwu</forename><surname>Nkisi-Orji</surname></persName>
							<affiliation key="aff1">
								<orgName type="department">School of Computing</orgName>
								<orgName type="institution">Robert Gordon University</orgName>
								<address>
									<settlement>Aberdeen</settlement>
									<country key="GB">Scotland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">David</forename><surname>Corsar</surname></persName>
							<email>d.corsar1@rgu.ac.uk</email>
							<affiliation key="aff1">
								<orgName type="department">School of Computing</orgName>
								<orgName type="institution">Robert Gordon University</orgName>
								<address>
									<settlement>Aberdeen</settlement>
									<country key="GB">Scotland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Chamath</forename><surname>Palihawadana</surname></persName>
							<email>c.palihawadana@rgu.ac.uk</email>
							<affiliation key="aff1">
								<orgName type="department">School of Computing</orgName>
								<orgName type="institution">Robert Gordon University</orgName>
								<address>
									<settlement>Aberdeen</settlement>
									<country key="GB">Scotland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Craig</forename><surname>Pirie</surname></persName>
							<email>c.pirie11@rgu.ac.uk</email>
							<affiliation key="aff1">
								<orgName type="department">School of Computing</orgName>
								<orgName type="institution">Robert Gordon University</orgName>
								<address>
									<settlement>Aberdeen</settlement>
									<country key="GB">Scotland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Derek</forename><surname>Bridge</surname></persName>
							<email>d.bridge@cs.ucc.ie</email>
							<affiliation key="aff2">
								<orgName type="department">School of Computer Science &amp; IT</orgName>
								<orgName type="institution">University College Cork</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Preeja</forename><surname>Pradeep</surname></persName>
							<email>ppradeep@ucc.ie</email>
							<affiliation key="aff2">
								<orgName type="department">School of Computer Science &amp; IT</orgName>
								<orgName type="institution">University College Cork</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Bruno</forename><surname>Fleisch</surname></persName>
							<email>bruno.fleisch@bt.com</email>
							<affiliation key="aff3">
								<orgName type="institution">British Telecommunications</orgName>
							</affiliation>
						</author>
						<title level="a" type="main">Building Personalised XAI Experiences Through iSee: a Case-Based Reasoning-Driven Platform</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">283FF41AD579575279963FAC4D377190</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2025-04-23T19:37+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Case-Based Reasoning</term>
					<term>Personalised Explanation Experiences</term>
					<term>Explainer Library</term>
					<term>Evaluation Cockpit</term>
					<term>Explanation Experiences Editor</term>
					<term>XAI Chatbot</term>
					<term>XAI Ontology</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Nowadays, eXplainable Artificial Intelligence (XAI) is well-known as an important field in Computer Science due to the necessity of understanding the increasing complexity of Artificial Intelligence (AI) systems or algorithms. This is the reason why we can find a wide variety of explanation techniques (explainers) in the literature, on top of some XAI libraries. The challenge faced by XAI designers here is deciding what explainers are the most suitable for each scenario, taking into account the AI model, task to explain, user preferences, needs and knowledge, and overall, fitting into the explanation requirements. With the aim of addressing this problem, the iSee project was conceived to provide XAI design users with supporting tools to build their own explanation experiences. As a result, we have developed iSee, a Case-Based Reasoning-driven platform that allows users to create personalised explanation experiences. With the iSee platform, users add their explanation experience requirements, and get the most suitable XAI strategies to explain their own situation, taking advantage of XAI strategies previously used with success in similar context. The iSee platform is composed of different tools and modules: the ontology, the cockpit, the explainer library, the Explanation Experiences Editor (iSeeE3), the chatbot, and the analytics dashboard. This paper introduces these tools as a demo and tutorial for current and future users and for the XAI community.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>Nowadays, Artificial Intelligence systems (AI) help us overcome many daily tasks, in critical and challenging domains, such as healthcare, manufacturing industry or security. Since these tasks have become crucial, the AI algorithms have been also developed to make them more accurate, which has led to the creation of more complex algorithms but, consequently algorithms that users find more difficult to understand <ref type="bibr" target="#b0">[1]</ref>. To overcome this problem, eXplainabile Artificial Intelligence (XAI) systems have bee developed, trying to meet the necessity to increase user trust in AI and enhance their utility <ref type="bibr" target="#b1">[2]</ref>. The research on XAI has grown by leaps and bounds due to high interest from the AI community and, as a result we can find a wide variety of explainability techniques (explainers) that can be applied to explain different AI models. The variety of explainers has also given rise to a new challenge: XAI design users find it difficult to know what type of explainers could be the best to implement, taking into account different factors, when they need to explain their own AI models and problems in specific contexts.</p><p>The iSee project tackles this problem, offering a platform that includes several tools for design users to find and build the best explanation strategies to apply in specific situations. The iSee platform is a Case-Based Reasoning (CBR)-driven platform, where users can share, retrieve, and reuse successful explanation experiences already applied in previous situations <ref type="bibr" target="#b2">[3]</ref>. An explanation experience is understood as: <ref type="bibr" target="#b0">(1)</ref> the set of needs, preferences, and constraints required to know when applying an XAI strategy; (2) the XAI strategy (the solution), which is a combination of explainers (answering users' questions) that we have represented in a modular fashion through Behaviour Trees (BTs); and (3) the evaluation of these strategies using feedback from end users.</p><p>In this work we present a tutorial/demo of the iSee platform. In the following sections, we first describe the fundamentals of the CBR cycle that drives the platform (Section 2). Later we guide the utilisation of the iSee platform through its components and tools (Section 3), and finally we draw some conclusions of the work (Section 4). At the end of this document (Appendix A), we provide readers with links to iSee online resources.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">The iSee Platform CBR Cycle</head><p>A CBR system is composed of four main steps: retrieval, reuse, revise, and retain. Here, we briefly introduce the iSee CBR cycle process performed when a new use case is created in the platform. Design users will carry out the following process through the iSee platform: they will create their own use case where they are going to include all the information related to the specific scenario that they expect explanation for (AI model and AI task, user types, users' knowledge, users' intents, etc.). This is the requirements capture step that must be performed before the CBR cycle itself. After that, users will get a tentative list of recommended strategies, which are the XAI strategies successfully applied in the most similar previous situations (cases) that the CBR engine previously stored in its case base). This is the retrieval step. The requirement capture and the retrieval step are done in the iSee cockpit. Moreover, at this point, users have the option of creating a personalised strategy by clicking on a button in the cockpit. The button triggers the generation of an automatically adapted explanation strategy where explainers that address specific questions are collected from neighbouring cases based on the similarity of questions and we call this approach the transformational reuse step <ref type="bibr" target="#b3">[4]</ref>. Thereafter, design users can choose one recommended strategy to apply to their use case (iSee suggests the best recommendation). However, they may need to edit this strategy if it is not fully applicable for their specific use case (for example, the explainers in the strategy might not be applicable to the AI model that they want to explain). Even if the strategy is applicable, design users may want to change the recommended strategy according to their needs in a more personalised way. These tasks can be carried out through the Explanation Experiences Editor (iSeeE3) and constitute the constructive reuse step. Getting back to the cockpit, users can then design evaluation questionnaires which will be sent to end users after those end users query and test the explanation strategies. Once the use case requirements and the XAI strategy are ready, design users can publish it as a complete use case, and send the solution to end users for further use. This evaluation is done in the iSee chatbot. End users walk through the explanation results through a chat, also answering the evaluation questionnaire designed by the design user. Finally, design users can access the analytics dashboard, where they can view the results of the questionnaire. This is the revise step and may lead design users to change their strategy if the results are not successful, or may lead them to save the solution (retain step) in our case base for other design users to use it with their own XAI problems if the results are successful.</p><p>The iSee platform also includes other tools and components that help to carry out this process: the iSee ontology and the explainer library. The iSee ontology (iSeeOnto) defines the vocabulary describing an explanation experience. The cockpit and iSeeE3 define the requirements through the iSeeOnto definitions, and get the solutions in the retrieval and reuse steps using the semantic knowledge provided by iSeeOnto. The explainer library, which contains 70 explainers, provides users with the execution of the explainers, allowing them to view the explanation results during the reuse and the revise steps. The platform also allows users to include their own explainers using the cockpit through the description provided by iSeeOnto.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">The iSee Tools</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1.">The iSee Ontology</head><p>The iSee Ontology (iSeeOnto)<ref type="foot" target="#foot_0">1</ref>  <ref type="bibr" target="#b4">[5]</ref> is the formalised representation of an explanation experience. It contains all the vocabulary and relationships necessary to define an explanation experience; we hope it may also help the XAI community guide the design of further explanation systems.</p><p>An explanation experience in iSeeOnto is described as a tuple ⟨𝐷, 𝑆, 𝑂⟩. 𝐷 defines the description of the situation that we need to explain. We included the AI model and AI task to explain, the end user profile, questions to answer, intentions, etc., and the explainability requirements. The explainability requirements define the desired type of explainers that needs to be included in the XAI strategy and which will frame the solution. The explainers are also defined in iSeeOnto through concepts well-known in the literature, such as portability, scope, concurrentness, data type, explainer implementation framework, among other features.</p><p>𝑆 is the explanation strategy that fulfils all the requirements included in the description 𝐷. An explanation strategy constitutes the execution of an explainer, or a set of explainers, that fit with the requirements in 𝐷. We have formalised an explanation strategy as a Behaviour Tree (BT) since it is a mathematical model that can execute explainers in a modular fashion.</p><p>Explainers and user questions will be in the leaves of the BT, while other composite nodes (internal nodes) will determine the execution of the explainers. iSeeOnto defines all of these terms and the relationships between them.</p><p>Finally, 𝑂 is the outcome that we obtain when we evaluate 𝑆 with real end users. Therefore, it represents the user satisfaction and 'goodness' with 𝑆 for the problem described in 𝐷.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2.">The Explainer Library</head><p>The explainer library<ref type="foot" target="#foot_1">2</ref> is an API where we expose 70 explainers available "as a service". This library is used in the iSee platform (specifically in the iSeeE3 tool and in the chatbot) every time we need to execute an explainer for design or end users to view the resultant explanation for their use case. The API may be used as an independent tool, since other XAI researchers or practisioners can make use of it, even without using the iSee platform.</p><p>iSee design users can even contribute to the explainer library: either because they want to include an explainer for other iSee users to use, or because they need that explainer as a part of the explanation strategy they want to shape in iSee. To do this, they need to include the explainer code in the iSee explainer library GitHub repository and later link all the explainer semantic information to iSeeOnto through the cockpit. This semantic knowledge, needed to perform the retrieval and reuse steps in iSee, is driven by the concepts modelled in iSeeOnto. In the cockpit, design users can see all the explainers already in the library (and all their semantic knowledge) and there is a button which the design users can select to fill a form where they are going to include the explainer knowledge.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.3.">The Evaluation Cockpit</head><p>The cockpit <ref type="foot" target="#foot_2">3</ref> is the tool where design users can access the main functionalities of iSee. First users need to create an account on iSee, and login in. Once they have an account, they can start using the cockpit. In the cockpit, design users can contribute to the explainer library, as we have seen in Section 3.2, but the main task to perform in the cockpit will be the creation of an explanation experience scenario linked to a specific use case. To do that, users will perform the following process. They will create a use case: they will open a pop-up where they will indicate the name, the domain and the goal of the use case. Once done, a new screen appears, where the users fill a form (that is going to be introduced gradually). This form is the place where they include all the explanation requirements, which are divided into three sections: the AI model settings, the AI model upload, and the user personas. It is important to note that the form is guided by iSeeOnto, i.e., users will select an option in most of the fields regarding the concepts defined in the ontology. This is going to facilitate the retrieval step since iSee can see the similarities between the user requirements and previous cases.</p><p>The AI model setting section includes the AI task to explain, the AI method to explain, the data setting (dataset type, data type, number of features and number of instances), and model performance values (metric type and metric value). In the AI model upload section, users can include the model via an API URL, or via a model file. In this case, users need to include the file, indicating its implementation framework, and a sample dataset file, where they will also indicate the data features, i.e., the type of features and possible values. Within the user persona section, the design users can create multiple personas. They have to indicate the persona name and their task domain and AI knowledge levels. Moreover, for each persona, they will add as many questions as they need from a list of questions related to an intent (user goal). These questions are the ones that user persona expect to get answered by reading the explanation generated by iSee. For each question, the design user will retrieve a recommended list of explanation strategies. To obtain the list of explanation strategies, we make use of CloodCBR<ref type="foot" target="#foot_3">4</ref> , a CBR tool that will compare the use case requirements provided by the design users -added through the cockpit-with the descriptions of previous cases. This list of explanation strategies will frame the XAI strategies of the most similar cases, from the iSee case base, to this specific use case. After retrieving the list, the design users will select one of them (after personalising it using the iSeeE3 tool should they wish to -see Section 3.4, or after personalising the strategy by clicking on a button to carry out the transformational reuse, where the explainers in the strategy are chosen be more suitable for the questions to be addressed). Then, they will include an evaluation questionnaire to evaluate that strategy for this specific intent and persona (see Figure <ref type="figure" target="#fig_0">1</ref>). The questionnaire might be created by the design user or imported, using state-of-the-art questions that measure the end users' satisfaction, 'goodness' or trust (for example, the Explanation Satisfaction Scale (Hoffman) <ref type="bibr" target="#b5">[6]</ref>). The design users will need to complete this task for all their user personas, and their intents. Once everything is included, the design users will publish the use case. Doing this, they are allowing end users to evaluate the explanation strategy through the cockpit (see Section 3.5).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.4.">The Explanation Experiences Editor</head><p>The Explanation Experiences Editor (iSeeE3)<ref type="foot" target="#foot_4">5</ref>  <ref type="bibr" target="#b6">[7]</ref> is the tool that allow design users to create their own explanation strategies, i.e., build their own BTs. It also allows them to execute the resulting strategy on sample data instances: they can see the AI model prediction for their use case, and the explanations that are going to be retrieved by that explanation strategy. Although design users can create their own BTs from scratch using this tool, the iSeeE3 main functionality is to carry out the CBR constructive reuse step. The explanation strategies recommended by the cockpit might not be wholly applicable, or maybe the design users want to change some elements according to their preferences or needs. iSeeE3 include several functionalities to help them fix the strategies. Opening the iSeeE3 from the cockpit, the design users can access the recommended strategy (i.e. BT). They can modify it by hand, dragging components (composite nodes, or explainers). However, the reuse process might not be straightforward to design users. So there are two main functionalities within the tool to help users perform the reuse task more automatically: they can substitute individual explainers in the BT, or substitute whole subtrees. Both tasks can be done in a guided or automatic way.</p><p>The non-applicable explainer substitution consists of finding the most similar explainer (from our library) and replacing the explainer that is not applicable by the one that has been retrieved and that is actually applicable for the use case. In the guided mode, the user can pick the explainer substitution from a list of recommendations. In the automatic mode, users will click on a button and the non-applicable explainer will be replaced by the most similar and applicable one. The similarities between explainers and the applicability filtering are driven by the semantic knowledge from iSeeOnto. In the same way, users can substitute the whole tree. With the guided mode, users can pick one applicable BT (i.e. a BT where all explainers are applicable) from the list of the most similar BTs recommended by the tool. The recommendation list comes from the case base and it is calculated using a Levenshtein Edit Distance that also includes semantic knowledge from the explainers. With the automatic mode, the BT will be replaced by the one that is most similar to the one to replace, should this replacement be applicable. For both functionalities iSeeE3 also includes a form where users can indicate the type of explainers that they want in their BT. For instance, they might say that they want explainers that generate counterfactual explanations, or explainers that show explanations using heatmaps. Furthermore, the tool includes a button that will advise users whether the resulting BT structure is correct in terms of iSee rules.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.5.">The Chatbot and The Analytics Dashboard</head><p>Once a design user has built a specific use case (i.e. once the requirements and the explanation strategies are defined), end users will access the cockpit to evaluate its explanation strategy. The evaluation can be done through two tools: the chatbot and the analytics dashboard.</p><p>The chatbot performs the explanation strategies established by the design user. The chatbot is interactive: end users have to pick from possible answers to the questions that the chatbot asks them. First the end users will start by selecting the user persona that corresponds to them. Second, they will choose whether they want to upload a data instance or use inbuilt sampling method to select a data instance. Third, the chatbot will show the end user a prediction made by the AI model. The AI model is the one uploaded by the design user, and is executed through the dataset also provided by the design user. Then, the chatbot shows the questions established by the design user for that user persona (see an example in Figure <ref type="figure" target="#fig_1">2</ref>). The end user will choose one question, and then the chatbot (following the execution workflow specified in the explanation strategy as a BT) will show the explanations one at a time. The user can choose to ask more questions (if any) or finish the process. In this case, the chatbot will allow users to answer the evaluation questionnaire that the design user has defined during the use case definition, finishing the process.</p><p>The answers from the end users for the evaluation questionnaire, combined with explanation strategy data, are available to the design users in the analytics dashboard. In this dashboard, they can see data such as the total number of interactions with the chatbot for this use case, and the number of interactions by persona. Also for each persona, the design users can view the number of interactions with each intent, the explainers performed, the answers to the evaluation questionnaire and the individual experience, i.e. the time spent by each end user in every step provided by the chatbot (explained in the previous paragraph). Together, interactions data allows design users to analyse how explanations were perceived by end users during their user experience.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Conclusions</head><p>The iSee platform is a CBR-driven tool that allow XAI design users to share, reuse and build personalised explanation experiences. Design users can: (1) determine the XAI problem that they need to solve for a specific use case; (2) obtain a personalised recommendation of previous successful explanation strategies for their problem; (3) build and personalise their own explanation strategies; (4) evaluate the resultant explanation strategy for their use case with real end users; and (5) share their explanation experience with other design users. In this work, we have described the iSee platform, and the tools and components that compose it. The main objective of the iSee platform is to be a useful tool for AI researchers and industry practitioners in order to encourage explainable and trustworthy AI. During the iSee project, we have completed three real world use cases successfully using the iSee tools: radiograph fracture detection, sensor anomaly detection, and telecom tasks blockers diagnosis. We also have studied about 10 use cases in other domains. Thanks to these evaluations, we have confirmed the iSee platform utility for the XAI community. In the future work, we might look into ethical considerations to make sure users understand the risks involved when using AI systems or the iSee platform. Finally, we expect iSee to become more popular in the following months, so we can try our platform for different use cases, enriching the case base, and providing more different types of solutions, improving the platform itself as a consequence.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Cockpit screen to set the evaluation questionnaire for an intent and persona.</figDesc><graphic coords="5,162.21,84.19,270.85,156.61" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: Explanation provided by iSee for a sensor anomaly detection use case.</figDesc><graphic coords="7,151.80,84.19,291.70,160.93" type="bitmap" /></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_0">Available at: https://github.com/isee4xai/iSeeOnto. Documentation available at https://isee4xai.github.io/iSeeOnto/ docs/explanationexperience-en.html</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_1">Available at: https://github.com/isee4xai/iSeeExplainerLibrary</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="3" xml:id="foot_2">Available at: https://cockpit-dev.isee4xai.com/. Code available at https://github.com/isee4xai/iSeeCockpit</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="4" xml:id="foot_3">Available at https://github.com/isee4xai/iSeeCloodCBR</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="5" xml:id="foot_4">Code available at: https://github.com/isee4xai/ExplanationExperienceEditor</note>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgments</head><p>iSee is an EU CHIST-ERA project which received funding for the UK from EPSRC under grant number EP/V061755/1, for Ireland from the Irish Research Council under grant number CHIST-ERA-2019-iSee (with support from Science Foundation Ireland under Grant number 12/RC/2289-P2), for Spain from the MCIN/AEI and European Union "Next Generation EU/PRTR" under grant number PCI2020-120720-2, and for France from ANR under grant number 21-CHR4-0004-01.</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">Explainable artificial intelligence (xai): motivation, terminology, and taxonomy</title>
		<author>
			<persName><forename type="first">A</forename><surname>Notovich</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Chalutz-Ben Gal</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Ben-Gal</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Machine Learning for Data Science Handbook: Data Mining and Knowledge Discovery Handbook</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="971" to="985" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Darpa&apos;s explainable artificial intelligence (xai) program</title>
		<author>
			<persName><forename type="first">D</forename><surname>Gunning</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Aha</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">AI magazine</title>
		<imprint>
			<biblScope unit="volume">40</biblScope>
			<biblScope unit="page" from="44" to="58" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Cbr driven interactive explainable ai</title>
		<author>
			<persName><forename type="first">A</forename><surname>Wijekoon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Wiratunga</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Martin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Corsar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Nkisi-Orji</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Palihawadana</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Bridge</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Pradeep</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">D</forename><surname>Agudo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Caro-Martínez</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">International Conference on Case-Based Reasoning</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="169" to="184" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Failure-driven transformational case reuse of explanation strategies in cloodcbr</title>
		<author>
			<persName><forename type="first">I</forename><surname>Nkisi-Orji</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Palihawadana</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Wiratunga</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Wijekoon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Corsar</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">International Conference on Case-Based Reasoning</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="279" to="293" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Conceptual modelling of explanation experiences through the iseeonto ontology</title>
		<author>
			<persName><forename type="first">M</forename><surname>Caro-Martínez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Wijekoon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">A</forename><surname>Recio-García</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Corsar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Nkisi-Orji</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">CEUR Workshop Proceedings</title>
				<imprint>
			<date type="published" when="2023">2023</date>
			<biblScope unit="volume">3389</biblScope>
		</imprint>
	</monogr>
	<note>CEUR Workshop Proceedings</note>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Explaining explanation for &quot;explainable AI</title>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">R</forename><surname>Hoffman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Klein</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">T</forename><surname>Mueller</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the human factors and ergonomics society annual meeting</title>
				<meeting>the human factors and ergonomics society annual meeting<address><addrLine>CA; Los Angeles, CA</addrLine></address></meeting>
		<imprint>
			<publisher>SAGE Publications Sage</publisher>
			<date type="published" when="2018">2018</date>
			<biblScope unit="volume">62</biblScope>
			<biblScope unit="page" from="197" to="201" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">iSeeE3-The Explanation Experiences Editor</title>
		<author>
			<persName><forename type="first">M</forename><surname>Caro-Martinez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">M</forename><surname>Darias</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Diaz-Agudo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">A</forename><surname>Recio-Garcia</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">SoftwareX</title>
		<imprint>
			<biblScope unit="volume">21</biblScope>
			<biblScope unit="page">101311</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<monogr>
		<author>
			<persName><forename type="first">A</forename></persName>
		</author>
		<title level="m">Online Resources • iSee webpage • iSee platform •</title>
				<imprint/>
	</monogr>
	<note>GitHub of the iSee project • iSee overview video • iSee YouTube channel • Link to the chatbot to evaluate a sensor anomaly detection use case (incognito mode required) • Online guidelines to evaluation experiments</note>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
