<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">Designing XAI-based Computer-aided Diagnostic Systems: Operationalising User Research Methods</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Elsa</forename><surname>Oliveira</surname></persName>
							<email>oliveira@aicos.fraunhofer.pt</email>
							<affiliation key="aff0">
								<orgName type="institution">Fraunhofer Portugal AICOS</orgName>
								<address>
									<addrLine>Rua Alfredo Allen 455/461</addrLine>
									<postCode>4200-135</postCode>
									<settlement>Porto</settlement>
									<country key="PT">Portugal</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Cristiana</forename><surname>Braga</surname></persName>
							<email>cristiana.braga@aicos.fraunhofer.pt</email>
							<affiliation key="aff0">
								<orgName type="institution">Fraunhofer Portugal AICOS</orgName>
								<address>
									<addrLine>Rua Alfredo Allen 455/461</addrLine>
									<postCode>4200-135</postCode>
									<settlement>Porto</settlement>
									<country key="PT">Portugal</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Ana</forename><surname>Sampaio</surname></persName>
							<email>ana.sampaio@aicos.fraunhofer.pt</email>
							<affiliation key="aff0">
								<orgName type="institution">Fraunhofer Portugal AICOS</orgName>
								<address>
									<addrLine>Rua Alfredo Allen 455/461</addrLine>
									<postCode>4200-135</postCode>
									<settlement>Porto</settlement>
									<country key="PT">Portugal</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Tiago</forename><surname>Oliveira</surname></persName>
							<email>tiago.oliveira@first-global.com</email>
							<affiliation key="aff1">
								<orgName type="department">First Solutions -Sistemas de Informação</orgName>
								<orgName type="institution">S.A</orgName>
								<address>
									<addrLine>Rua Conselheiro Costa Braga</addrLine>
									<settlement>Matosinhos</settlement>
									<country key="PT">Portugal</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Filipe</forename><surname>Soares</surname></persName>
							<email>filipe.soares@aicos.fraunhofer.pt</email>
							<affiliation key="aff0">
								<orgName type="institution">Fraunhofer Portugal AICOS</orgName>
								<address>
									<addrLine>Rua Alfredo Allen 455/461</addrLine>
									<postCode>4200-135</postCode>
									<settlement>Porto</settlement>
									<country key="PT">Portugal</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Luís</forename><surname>Rosado</surname></persName>
							<email>luis.rosado@aicos.fraunhofer.pt</email>
							<affiliation key="aff0">
								<orgName type="institution">Fraunhofer Portugal AICOS</orgName>
								<address>
									<addrLine>Rua Alfredo Allen 455/461</addrLine>
									<postCode>4200-135</postCode>
									<settlement>Porto</settlement>
									<country key="PT">Portugal</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff2">
								<orgName type="institution" key="instit1">Elsa Oliveira</orgName>
								<orgName type="institution" key="instit2">Cristiana Braga</orgName>
								<address>
									<settlement>Ana Sampaio, Tiago Oliveira, Filipe</settlement>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">Designing XAI-based Computer-aided Diagnostic Systems: Operationalising User Research Methods</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">A441A9E19AA0656C8AB10670BE0EE634</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-23T22:57+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Explainable AI, Computer-aided detection, Decision Support System, Ophthalmology, Glaucoma, Cytology, Cervical cancer, Retinal Imaging, Microscopy, (L. Rosado) 0000-0002-7105-9654 (E. Oliveira)</term>
					<term>0000-0002-9384-2252 (C. Braga)</term>
					<term>0000-0003-1770-4429 (A. Sampaio)</term>
					<term>0000-0002-2881-313X (F. Soares)</term>
					<term>0000-0002-8060-831X (L. Rosado)</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>AI technology has the potential to support humans' processes and tasks by augmenting human capabilities and effectiveness. Computer-aided systems have been implemented in healthcare mainly to support clinical decisions. As in other areas, the impact, complexity, and opacity of AI operations have led to the establishment of guidelines for trustworthy AI, which implies being understandable. This study describes the user research work carried out by a multidisciplinary team composed of ML engineers, design researchers, and medical experts, to inform the design of algorithms and user interfaces for two XAI-based clinical decision support tools targeted at Cervical cancer and Glaucoma screening. In particular, we sought to leverage and bridge individual and collective expertise to understand the context, decision-making processes and criteria, and values that frame the respective clinical decisions. The article describes how we operationalised the research activities with expert users and what strategies we followed for subsequent content analysis, ending with the sharing of lessons learned as valuable insights for other research teams interested in designing computer-aided diagnostic systems based on human-centred XAI approaches.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>Despite its potential, AI has struggled to be understandable. This requirement has been critical in several areas, mainly in healthcare <ref type="bibr" target="#b0">[1,</ref><ref type="bibr" target="#b1">2]</ref>, where AI can support clinical decisions. There has been consensus on the need to promote accountable and trustworthy AI. The European Commission's High-Level Expert Group on Artificial Intelligence (AI HLEG) says that whenever an AI system has a significant impact on people's lives, it should be possible to demand a suitable explanation of the AI system's decision-making process <ref type="bibr" target="#b2">[3]</ref>. These considerations have led AI towards Explainable AI (XAI), which in turn lever-aged human-centred design methods to uncover what to explain, why, how, and for whom <ref type="bibr" target="#b3">[4,</ref><ref type="bibr" target="#b4">5,</ref><ref type="bibr" target="#b5">6,</ref><ref type="bibr" target="#b6">7]</ref>. We share a study of how we operationalised Human-Centred Design (HCD) methods to inform the design of algorithms and user interfaces for two XAI-based clinical decision support tools for Cervical cancer and Glaucoma screening. We were concerned with grasping medical experts' mental models and reasoning processes. While mental models are mental constructs that represent a distinct possibility and derive a conclusion from them, reasoning implies a process to derive a conclusion and depends on envisaging the possibilities (mental models) consistent with a starting point <ref type="bibr" target="#b7">[8]</ref>. So, to access the diagnosis' reasoning and identify the decision-making data and the explanations structures to apply in the design of XAIbased clinical decision support tools, we needed to get inside the diagnosis process with those who practice itthe medical experts. <ref type="bibr" target="#b8">[9,</ref><ref type="bibr" target="#b10">10]</ref> This paper is structured into 6 sections. First section introduces the demand for XAI systems. Section 2 identifies the objectives and design of the study, subdivided into three phases: contextualisation, elicitation, and validation. Section 3 briefly introduces the medical context of Cervical cancer and Glaucoma, on which the work was focused. Section 4 describes how we operationalised the research work focusing on the research activities with the users and the analysis of the collected content. Finally, in section 5 we share lessons learned from this study, and section 6 indicates the main conclusions and future work.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Goals and Study Design</head><p>As a multidisciplinary team, composed by Machine Learning (ML) engineers, design researchers, and medical experts, we sought to leverage and bridge individual and collective expertise, especially from the medical area for which the systems were conceived, to inform the design of algorithms and user interfaces for explainable decision support software targeted at Cervical cancer and Glaucoma screening. We based our study on the results of the user research activities, which aimed to understand the context, processes, and values that frame clinical decisions in the above-mentioned health areas. The research process was guided by three phases: contextualisation, elicitation, and validation. The user research methods applied in each phase (described below) returned a considerable amount of fieldwork materials, i.e., written, verbal and visual content, that researchers needed to analyse to enhance understanding of the data. In analysing these data, we initially focused on codifying what the clinicians said (written transcription) about their decision-making process. However, most of their explanations evoked visual aspects of the images. As we are non-experts, we quickly realised that we needed to match what the doctors were saying with the respective visual elements they were characterising in their explanations. For example, when clinicians explained that a cell was abnormal "because it had a halo around the nucleus", HCD and ML researchers could not understand what a halo was without a visual reference of that cell containing a halo. The content analysis process based on Transcription, Coding, and Systematisation, paved the way for the decision-making data and inherent reasoning structure.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.">Contextualisation</head><p>As a first step, the researchers sought to become familiarised with the jargon, clinical practices, and decisionmaking processes used by health professionals. Initially, the researchers made more superficial research in online medical articles, also to acquire the basic knowledge to prepare for the interviews with medical experts. In fact, the contextualisation was accomplished mainly through semi-structured interviews which script applied Task Reflection and Retrospection methods, to prompt participants to reflect and describe their daily clinical tasks and diagnostic practices. The interviews gave us an overview of clinical practices, decision-making processes, values, and a quick window into participants' mental models as they gave examples of clinical cases and how they decided on them.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.">Elicitation</head><p>The elicitation phase asked for more detail on the decision-making process, decision-making data, and on the explanation structures that support it. To this end the research team relied on referenced methods for mental models' elicitation <ref type="bibr" target="#b11">[11]</ref>, such as Semi-structured interviews, Observation, and Think-Aloud <ref type="bibr" target="#b12">[12,</ref><ref type="bibr" target="#b13">13]</ref>, together with co-creation practices -that made use of imaging data and other design materials to facilitate participants in demonstrating the processes of analysis and decisionmaking. Nielsen refers Think-Aloud method as effective in giving us insights into users' mental models regarding a given task. The study also drew on the procedures of a field study method based on Observation and interviews to understand work practices and behaviors -Contextual inquiry <ref type="bibr" target="#b14">[14,</ref><ref type="bibr" target="#b15">15]</ref>. Kim Salazar on Nielsen Norman Group website highlights the value of the contextual inquiry method -to inquiry in context, which results in a collaborative interpretation between researchers and expert users about work practices and behaviors, with a more in-depth understanding of experts' reasoning. With these references in mind, the research team set up workshops to observe, and question medical experts analysing and deciding on clinical cases and from clinical data.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.3.">Validation</head><p>The validation stage allowed us to discuss, correct, complete, and refine with medical experts the research findings. Through co-creation design practices, researchers designed group and individual workshops, in both remote and in-person versions, in order to display the decisionmaking criteria within the respective structures, to be discussed and easily edited and iterated in real-time. For some questions, we used A/B testing method for participants to select the best option.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Cervical cancer and Glaucoma</head><p>As mentioned in the introduction, this study addresses two distinct health areas, Anatomical Pathology and Ophthalmology, more specifically, Cervical cancer and Glaucoma. The main goal of the study was to design an explainable decision support software per area, both based on imaging screening, to be used by medical experts, and physicians in training.</p><p>Cervical cancer screening will mainly rely on cytological microscopic images, while Glaucoma screening on retinal images. The research team needed to go deep into each clinical practice to define the systems' requirements. Table <ref type="table" target="#tab_0">1</ref> lists the main aspects that characterise the two health areas under study, considering the analysis that medical experts carry out per patient. This knowledge Minimum of 5000 squamous cells <ref type="bibr" target="#b18">[18]</ref> (average of 3.8 cells/image). Good image quality.</p><p>Visibility and sharpness of optic nerve and macula. Completeness of temporal arcade. Clear visibility of small vessels. Field of view well illuminated (min. 80%) <ref type="bibr" target="#b19">[19]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Case classification</head><p>Grading by lesion level, according to the Bethesda System's convention <ref type="bibr" target="#b20">[20]</ref> Staging of Glaucoma <ref type="bibr" target="#b21">[21]</ref> Comparative analysis</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Experts take intermediate squamous cells as a reference for comparison</head><p>Experts check the symmetry between the left eye and the right eye</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>End-users Cytopathologists, cytotechnicians (diagnose), and physicians in training</head><p>Glaucomatologists, (diagnose), ophthalmologists, and physicians in training was built-up throughout the contextualisation and elicitation phases. While both areas share common aspects, they also have some significant differences.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Operationalisation of research activities</head><p>In this section, we describe how we operationalised the research activities for the contextualisation, elicitation, and validation phases. At the beginning of the study, all participants received a general informed consent that gave an overview of the user research agenda, being further provided a detailed informed consent per activity. The study counted with up to 5 participants per health area -in Cervical cancer, 3 cytopathologists and 2 cytotech-nologists; in Glaucoma, 4 ophthalmologists specialised in Glaucoma (glaucomatologists). Because of COVID-19, in particular the restrictions on in person group meetings and on normal access to hospitals and clinical settings, most user research activities took place remotely through digital and online platforms. Through these, participants were able to access anonymised screening images, as well as other clinical data, to demonstrate their decision-making process, and reasoning, while observed and questioned by the research team.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.1.">Contextualisation interviews</head><p>After some basic research through online medical articles, the research team draw the interview script addressed to the medical experts. The interview script aimed at:</p><p>understanding clinical procedures, i.e., from the first consultation up to and after diagnosis, eliciting medical experts' values, i.e., their motivation for the medical field, examples of impactful cases, and, very important, medical expectations regarding the introduction of AI systems in the clinical practice. The semi-structured interviews were carried out remotely through video call by Microsoft Teams software. To note that in Cervical cancer, the research team took advantage of the results of a previous and related study with cytopathologists and cytotechnicians <ref type="bibr" target="#b22">[22,</ref><ref type="bibr" target="#b20">20]</ref> that had conducted in-person semi-structured interviews with the same participants. These interviews enabled us to understand the processes involved in cytological analysis, from the reception of the sample to the diagnosis.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.1.1.">Interviews analysis</head><p>Once the interviews were completed, we transcribed them using oTranscribe software <ref type="bibr" target="#b23">[23]</ref>. We then organised the participants' insights into the main themes raised during the interviews.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2.">Workshops for eliciting diagnostic processes</head><p>Familiarised with both medical areas, we inspired ourselves in the contextual inquiry method to design the workshops that would enable us to elicit experts' diagnostic assessment process. Our goal was to understand what experts look at when they analyse a clinical case, specifically, an imaging examination, and what criteria they use to assess whether it is a pathological change.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2.1.">Designing remote workshops</head><p>The analysis of imaging examinations was a requirement for the diagnosis assessment, thus, we needed to observe experts analysing such images. Usually, we would visit the experts' workplace and observe them in a real clinical setting. However, due to COVID-19, the workshops had to be remote, and so, we mimicked this observation remotely.</p><p>For Cervical cancer, we asked a cytologist to provide us with images of liquid-based cytological samples. For Glaucoma, we asked a glaucomatologist to provide us with retinal images. However, this was not all. From the interviews, we learned that both medical fields complemented images' interpretation with clinical data, which we were attending to. But we also learned that Glaucomatous pathology was more complex to diagnose, because glaucomatologists often had to integrate complementary diagnostic exams to reach a diagnosis.</p><p>With this in mind, we asked the glaucomatologist to provide us with a set of anonymised complementary diagnostic exams with diverse diagnosis: unconfirmed, borderline, early stage Glaucoma and advanced stage Glaucoma.</p><p>To ensure participants' unbiased decisions, we first conducted individual workshops. Afterwards we ran a group workshop for both medical fields to help identify consensual criteria and foster discussions around the least consensual ones.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2.2.">Conducting individual workshops</head><p>Each workshop consisted of one main task: the participant, as a medical expert, would assess imaging examinations in real-time and think aloud about their analysis. This way, we could follow the assessment process and ask questions whenever needed to better understand it. Moreover, we asked participants to annotate relevant findings whose appearance suggested a pathological change and to provide the respective diagnosis classification. Experts in Cervical cancer classified cytological images according to the Bethesda System's convention. Experts in Glaucoma classified retinal images according to the four stages mentioned in section 4.2.1. Each participant analysed from three to seven images consisting of liquidbased cytological samples (in Cervical cancer) or from four to sixteen images consisting of eight pairs of retinal images (in Glaucoma). Figure <ref type="figure" target="#fig_0">1</ref> shows a visual field of a cytological sample with two cells annotated by a participant for their abnormality, and figure 2 a retinal image being analysed by a participant.</p><p>Given the interdependence with other examinations in Glaucoma diagnosis, and the wider range of diagnostic factors outside imaging data, Glaucoma workshops comprised an additional task. Each participant was asked to list the steps of a usual medical procedure, from the first consultation to the diagnosis, describing other relevant examinations beyond the retinal image. Figure <ref type="figure" target="#fig_2">3</ref> shows the timeline filled in by 1 of the 4 participants considering the examinations performed throughout the analysis of a given clinical case (for example, José, 62 years old with high intraocular pressure) -from the first consultation to diagnosis, and, where necessary, in the patient followup. In the second part of the workshop, the participant accessed anonymised eye examinations, corresponding to different diagnoses, from non-Glaucoma to advanced stage Glaucoma, to then select and analyse the most representative of a specific clinical case. Figure <ref type="figure" target="#fig_1">2</ref> is one of the retinal images that a participant has zoomed in and centred on the optical disc to show which image features reflect the state of the eye's structures and should therefore be considered as a criterion for decision making. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2.3.">Transcribing and analysing</head><p>As we transcribed the workshops, it became evident that we should assign textual excerpts to image cut-outs, as most of experts' explanations consisted of descriptions of visible characteristics in the analysed images. Thus, mapping the object of analysis with the respective transcription enabled us to keep a correspondence between what was said and what was being observed in the image (Figure <ref type="figure" target="#fig_3">4</ref>).</p><p>We did this for each participant. Almost all participants, from both medical fields, mentioned how the analysis and conclusions of some clinical cases were subjective. For instance, a glaucomatologist said: "Sometimes it's not black and white, it's grey", meaning that the same examinations and clinical data may lead experts to different decisions. This happens when the available elements for diagnosis are unclear, due to either image characteristics that hinder experts' analysis (e.g. blurry image), or to characteristics of the anatomical structures, which can be themselves confusing (when the same visual appearance can be the result of different possible causes), which requires more tests and more time.</p><p>Moreover, Cervical cancer experts highlighted the subjectivity intra-and inter-observer, explaining that not only the decision may vary between experts, as the same expert could give a different classification to the same sample at different moments in time. Therefore, we sought this subjective dimension by comparing the analysis of each participant to the same object of analysis, and </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2.4.">Coding and systematisation</head><p>Once we completed the transcripts, we created a categorisation matrix in Excel to code the data into a set of categories that constitute the building blocks of the explanations, which allowed us to uncover a generic explanation structure suitable for both use cases. We used the columns' headings for the categories, and the rows to list the image that has triggered the explanation together with the textual explanation (quote) and the set of categorisable criteria (Figure <ref type="figure" target="#fig_5">6</ref>).</p><p>As we went on with the codification, we iteratively refined the categories into Key structure(s) examined, Key feature(s) concerned, Risk factor, Not Cervical cancer/Glaucoma factor, Doubt factor, Result attributed, and finally, Key expression used by the expert. As the Excel's content increased, we identified that the criteria we filled in the categories would repeat. So, we created an Excel  tab to list the criteria for each category as they emerged throughout the process. We ended up gathering a list of options that enabled us to streamline the filling-in process. To avoid subjectivity and/or interpretation errors in the process of codification, we organised an internal panel of three coders composed of researchers involved in these activities. All transcriptions were assigned to this panel, varying who would be the first coder. While the first coder would codify the transcription from scratch, the following two would validate the first codification. Taking the following quote as an example from Cervical cancer, we would describe as table <ref type="table" target="#tab_2">2</ref> shows.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>It has a darker nucleus, but with this resolution, when I try to zoom in, I can't see the characteristics.</head><p>Figure <ref type="figure" target="#fig_5">6</ref> shows the variability of decision criteria by category that was raised throughout the analysis.</p><p>By the end of the analysis, we uncovered the most relevant criteria used by experts in each medical field to analyse and explain their decisions. And we could standardise that most of the explanations followed this structure:  </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>e.g. Cervical cancer: The [colour] of the [nucleus] is [hyperchromatic]. Glaucoma: The [optic disc] has an [excavation greater than 0.4].</head><p>Moreover, we found that sentences stating a "not Cervical cancer/Glaucoma factor" or "doubt factor" could follow the Key feature concerned. Experts used them to suggest a plausible contradiction that prevented them from providing a classification of which they were confident. In these explanations, the experts point out a structure that he/she observed and characterise its aspectreflecting a well-known and established risk factor in the domain knowledge, i.e., Cervical cytology: [hyperchromatic], Glaucoma: [excavation greater than 0.4]. Nevertheless, the explanations also stress -through the contrastive expression 'however' -other characteristics that complement and contrast the first ones, i.e., Cervical cytology: [there are overlapping cells], Glaucoma: [is symmetric]. And this prevents the experts from discerning with certainty whether the first observed characteristic is an anomaly or not.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>e.g., Cervical cancer: The [colour] of the [nucleus] is [hyperchromatic], however, [there are overlapping cells]. Glaucoma: The [optic disc] has an [excavation greater than 0.4], however, [is symmetric].</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.3.">Workshops for validation</head><p>Based on the results of previous user research activities, researchers designed validation workshops to: (i) ensure no conflicting information among the knowledge shared by each participant, (ii) remove possible imprecision from researchers' interpretation and consequent analysis outcomes, and (iii) get insights on a first version of the graphical user interface (GUI) designed from scratch to attend the elicited diagnostic processes.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.3.1.">Conducting group workshops</head><p>The first validation session was carried out through the Mural platform, from where participants accessed and interacted (by editing, deleting, or adding content) with the list of decision-making criteria raised so far in order to ensure their correctness and completeness. In Glaucoma workshops, participants were also asked to analyse several examinations, mainly retinal images, and to choose the applicable criteria for each one from the list elicited by researchers. We asked participants to position the selected criteria in one of three possibilities: non-Glaucoma, Glaucoma, or borderline (Figure <ref type="figure" target="#fig_6">7</ref>).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.3.2.">Validating content and container -an informed GUI prototype</head><p>In the second validation session, researchers presented the validated decision-making criteria integrated into a Graphical User Interface (GUI) prototype. The aim was to get feedback on the criteria and on the UI components presenting it. According to participants' availability, the Cervical cancer session took place in person (Figure <ref type="figure" target="#fig_7">8</ref>), and the Glaucoma session took place remotely (Figure <ref type="figure" target="#fig_8">9</ref>). Some categories and criteria seemed to have more than one possible way to name or present in the interface. Thus, to assess the correctness and completeness of the data as well as the system's components and related features, we applied A/B testing for participants to choose the best options.</p><p>In Glaucoma study, we conducted a remote session through which we shared a PowerPoint presentation with images of the prototype of the GUI together with its content (the elicited decision-making criteria) listed in an editable text box, as shown in Figure <ref type="figure" target="#fig_8">9</ref>. The content was discussed in real-time and, whenever necessary, easily edited.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.">Lessons Learned</head><p>L1: Multidisciplinary team The design of XAI-based clinical decision support tools requires extensive knowledge from various domains. It is paramount that teams ensure an iterative communication that keeps all in the loop, i.e., design researchers, medical experts, ML Engineers, etc. Let us highlight ML Engineers' guidance on the feasibility of the required functionalities, their support in defining the needed data, i.e., quantity and quality, and the infrastructure for implementation. Many systems based on supervised learning require annotated data, analysed by experts in terms of elements needed to guide the models' learning process. In the case of medical XAI systems, this requires close cooperation with clinical experts to ensure the annotation of the data instances objectively and uniformly. This way, ML Engineers guarantee that the final data set comprises cases sufficiently representative of the different data properties that may arise in practical scenarios.</p><p>L2. Contextual inquiry method as a basis for elicitation The contextual inquiry method inspired the study to observe experts performing a task as close to reality as possible by having them verbalise their thoughts while analysing imaging examinations and providing diagnostic classifications for them. We conclude that, when in-loco sessions are not possible, researchers can simulate the method remotely using digital and online platforms that enable to: video call, screen sharing, display relevant data for analysis and discussion, and freely write. We asked the experts for analysis materials from their daily work, e.g., anonymised imaging examinations, and then used the online platform Mural to present analysis tasks using these materials. While sharing the screen, experts analysed, selected, and annotated the digital images, and researchers asked timely questions that arose from observing what participants were doing and saying (think-aloud).</p><p>L3. Mapping text with images helped associate features to structures From the elicitation to the content analysis, we found it elementary to map the textual transcripts with the image that experts were analysing. We cropped, framed, and sketched over the images to correlate what experts were saying with what they were seeing. In doing this, some categories emerged transversely among both experts and images analysed, so this mapping led to discovering a standard structure of the explanations. To the left of each table, there is the respective list of criteria from which participants were asked to select those observed in the retinal image and associate them to a non-Glaucoma, Glaucoma, or borderline diagnosis. The criteria positioned between the two columns would be considered borderline case criteria. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>L4. Categorisation matrix for multidisciplinary analysis</head><p>As the categories emerged, we used Excel's functionalities, such as drop-down lists to streamline the process of matching features to structures facilitating the systematisation of the analysis across more team members, i.e., design researchers and ML engineers.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6.">Conclusions and Future Work</head><p>This paper describes the user research activities carried out by a multidisciplinary team to inform the design of Machine Learning algorithms and user interfaces for two XAI-based computer-aided diagnostic systems for Cervical cancer and Glaucoma. We shared what we think might be useful for other teams involved in the design of Explainable AI systems, namely, ways to operationalise human-centred design methods considering the objectives of Contextualisation, Elicitation, and Validation of such systems. In that scope, we demonstrate transcription, coding, and systematisation strategies that facilitated our content analysis, in particular, a categorisation matrix that helped uncover decision-making criteria and respective explanations' structure to inform the design of AI-generated explanations. Future work will focus on further developing the graphical user interface (GUI) to adapt it to an AI-based classification system to support experts' decision-making process.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Screenshot from the individual workshop for elicitation, conducted remotely through a digital platform showing a digital liquid-based cervical sample, with two of several cells noted by the participant for their abnormality</figDesc><graphic coords="5,89.29,84.19,203.36,203.61" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: Screenshot from the individual workshop for elicitation, conducted remotely through a digital platform showing a retinal image being analysed with the participant pointing to the optical nerve to explain what is a pathologic optic disc cupping (excavation)</figDesc><graphic coords="5,302.62,84.19,203.36,203.36" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 3 :</head><label>3</label><figDesc>Figure 3: Screenshot from the individual workshops, conducted through Mural platform, listing a sequence of steps (required eye examinations) until reaching a diagnosis for a given hypothetical patient</figDesc><graphic coords="6,89.29,84.19,416.71,161.81" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Figure 4 :</head><label>4</label><figDesc>Figure4: Word template setup for transcription aiming at mapping visual content -cells, or other structures -with textual excerpts, while keeping the order of analysis. The process included annotating on the image the object/area analysed and associating the number that corresponds to its order of analysis by the participant.</figDesc><graphic coords="6,89.29,287.23,416.71,199.62" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Figure 5 :</head><label>5</label><figDesc>Figure 5: An example of the inter-observer comparative analysis carried out in the Cervical cancer study. The same microscopic field of view was analysed by 5 medical experts resulting in different annotations and classifications.</figDesc><graphic coords="7,89.29,84.19,416.71,75.28" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_5"><head>Figure 6 :</head><label>6</label><figDesc>Figure6: Screenshot of the explanations' systematisation in Excel for Glaucoma. In the columns' headings, we may read the categories, left to right: Explanation object, Explanation, Key structure examined, Key feature examined, Glaucoma potential factor, Not Glaucoma factor, Unsatisfactory for analysis, and Confounding factors. Each category was fed according to the criteria identified in the explanations given by the medical experts.</figDesc><graphic coords="7,89.29,200.69,416.71,171.73" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_6"><head>Figure 7 :</head><label>7</label><figDesc>Figure 7: Screenshot of the first validation of the clinical decision criteria by the glaucomatologists. At the top, a magnification of a retinal image (right and left eye) centred on the optical disc. Below, 3 tables, one per eye structure: Discs, Neuroretinal ring, and Vessels.To the left of each table, there is the respective list of criteria from which participants were asked to select those observed in the retinal image and associate them to a non-Glaucoma, Glaucoma, or borderline diagnosis. The criteria positioned between the two columns would be considered borderline case criteria.</figDesc><graphic coords="9,130.96,84.19,333.37,203.11" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_7"><head>Figure 8 :</head><label>8</label><figDesc>Figure 8: In-person workshop with three cytopathologists and one cytotechnologist to validate the decision-making criteria integrated into a GUI prototype.</figDesc><graphic coords="9,130.96,357.98,333.37,175.31" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_8"><head>Figure 9 :</head><label>9</label><figDesc>Figure 9: A PowerPoint slide showing on the right the decision-making criteria list regarding the optical disc, and on the left side a GUI prototype showing an annotated retinal image with an open dropdown menu component showing part of the decision-making criteria list.</figDesc><graphic coords="10,130.96,84.19,333.36,174.01" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_0"><head>Table 1</head><label>1</label><figDesc>Characteristics of Cervical cancer and Glaucoma screening</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_1"><head>Cytology: Cervical cancer Ophthalmology: Glaucoma</head><label></label><figDesc></figDesc><table><row><cell>Purpose</cell><cell>Screening for reversible gynaecological</cell><cell>Screening for irreversible eye disease</cell></row><row><cell></cell><cell>disease</cell><cell></cell></row><row><cell>Main Imaging Data</cell><cell>Cervical cytology specimen (liquid-based)</cell><cell>Color Fundus Photography</cell></row><row><cell>collection</cell><cell></cell><cell></cell></row><row><cell>Complementary exams</cell><cell>HPV diagnosis</cell><cell>Ocular anatomy (e.g., narrow angle), IOP</cell></row><row><cell></cell><cell></cell><cell>(intraocular pressure), Corneal tomography</cell></row><row><cell></cell><cell></cell><cell>for pachymetry, volume and depth, Retinal</cell></row><row><cell></cell><cell></cell><cell>Nerve Fiber Layer Thickness (RNFLT)</cell></row><row><cell></cell><cell></cell><cell>measured via OCT, and visual field tests.</cell></row><row><cell>Patient data</cell><cell>(Not mandatory) Age, last menstruation,</cell><cell>Age, ethnicity, family history of the</cell></row><row><cell></cell><cell>contraceptive method, relevant medical</cell><cell>condition, associated pathologies (e.g.,</cell></row><row><cell></cell><cell>therapeutics, e.g., hormonal, chemotherapy</cell><cell>diabetes, cataracts) and risk medication</cell></row><row><cell></cell><cell></cell><cell>(e.g., antidepressant)</cell></row><row><cell>Digitalisation outcome -</cell><cell>Approximately 100 microscopic images per</cell><cell>Between 1 and 7 retinal fundus images per</cell></row><row><cell>Artefact of analysis</cell><cell>sample</cell><cell>eye [16, 17]</cell></row></table><note>VariationEach image represents a small section of the entire sample Each image can vary in eye laterality (left or right) or field of viewImage navigationThe expert browses, image by image, zooming in and out, to identify cells with abnormal aspectThe expert checks an image individually, zooming in and out, to look for abnormalities in the main structures Criteria of adequacy Representation of the Transformation Zone.</note></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_2"><head>Table 2</head><label>2</label><figDesc>Explanation categorisation example</figDesc><table><row><cell>Key area of image examined</cell><cell>It has a darker nucleous (Part of a cell)</cell></row><row><cell>Key structure(s) examined</cell><cell>nucleous (Nucleous)</cell></row><row><cell>Key feature(s) concerned</cell><cell>a darker nucleous (Colour intensity)</cell></row><row><cell>Risk factor</cell><cell>a darker nucleous (Hyperchromasia)</cell></row><row><cell cols="2">Not Cervical cancer / Glaucoma factor Not applicable</cell></row><row><cell>Doubt factor</cell><cell>but with this resolution,... I can't see the characteristics (Image quality -Blurred)</cell></row><row><cell>Assigned result</cell><cell>... I can't see the characteristics Insufficient / No classification</cell></row></table></figure>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgments</head><p>We would like to thank the medical experts from the Anatomical Pathology Service of the Portuguese Oncology Institute -Porto (IPO-Porto) and from the University Hospital Centre of Porto (CHPorto), who participated in the user research sessions. A special thanks to our senior colleagues at Fraunhofer Portugal AICOS, Ana Barros and Francisco Nunes, who mentored us during the writing of the article. Finally, this work was financially supported by the project Transparent Artificial Medical Intelligence (TAMI), co-funded by Portugal 2020 framed under the Operational Programme for Competitiveness and Internationalization (COMPETE 2020), Fundação para a Ciência and Technology (FCT), Carnegie Mellon University, and European Regional Development Fund under Grant 45905.</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">Explainable artificial intelligence: A survey</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">K</forename><surname>Došilović</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Brčić</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Hlupić</surname></persName>
		</author>
		<idno type="DOI">10.23919/MIPRO.2018.8400040</idno>
	</analytic>
	<monogr>
		<title level="m">41st International Convention on Information and Communication Technology, Electronics and Microelectronics</title>
				<meeting><address><addrLine>MIPRO)</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2018">2018. 2018</date>
			<biblScope unit="page" from="210" to="0215" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Explainable artificial intelligence for neuroscience: Behavioral neurostimulation</title>
		<author>
			<persName><forename type="first">J.-M</forename><surname>Fellous</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Sapiro</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Rossi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Mayberg</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ferrante</surname></persName>
		</author>
		<idno type="DOI">10.3389/fnins.2019.01346</idno>
		<ptr target="https://www.frontiersin.org/articles/10.3389/fnins.2019.01346.doi:10.3389/fnins.2019.01346" />
	</analytic>
	<monogr>
		<title level="j">Frontiers in Neuroscience</title>
		<imprint>
			<biblScope unit="volume">13</biblScope>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<monogr>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">E</forename><surname>Commission</surname></persName>
		</author>
		<ptr target="https://ec.europa.eu/futurium/en/ai-alliance-consultation.1.html" />
		<title level="m">Ethics Guidelines for Trustworthy AI -FUTURIUM -European Commission</title>
				<imprint>
			<date type="published" when="2021-10-13">2021. 13. Oct. 2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Peeking inside the black-box: A survey on explainable artificial intelligence (xai)</title>
		<author>
			<persName><forename type="first">A</forename><surname>Adadi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Berrada</surname></persName>
		</author>
		<idno type="DOI">10.1109/ACCESS.2018.2870052</idno>
	</analytic>
	<monogr>
		<title level="j">IEEE Access</title>
		<imprint>
			<biblScope unit="volume">6</biblScope>
			<biblScope unit="page" from="52138" to="52160" />
			<date type="published" when="2018">2018</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">A Survey on the Explainability of Supervised Machine Learning</title>
		<author>
			<persName><forename type="first">N</forename><surname>Burkart</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">F</forename><surname>Huber</surname></persName>
		</author>
		<idno type="DOI">10.1613/jair.1.12228</idno>
	</analytic>
	<monogr>
		<title level="j">J. Artif. Intell. Res</title>
		<imprint>
			<biblScope unit="volume">70</biblScope>
			<biblScope unit="page" from="245" to="317" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<monogr>
		<title level="m" type="main">Question-driven design process for explainable ai user experiences</title>
		<author>
			<persName><forename type="first">Q</forename><forename type="middle">V</forename><surname>Liao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Pribić</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Han</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Miller</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Sow</surname></persName>
		</author>
		<idno type="DOI">10.48550/ARXIV.2104.03483</idno>
		<ptr target="https://arxiv.org/abs/2104.03483.doi:10.48550/ARXIV.2104.03483" />
		<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">Xai systems evaluation: A review of human and computer-centred methods</title>
		<author>
			<persName><forename type="first">P</forename><surname>Lopes</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Silva</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Braga</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Oliveira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Rosado</surname></persName>
		</author>
		<idno type="DOI">10.3390/app12199423</idno>
		<ptr target="https://www.mdpi.com/2076-3417/12/19/9423.doi:10.3390/app12199423" />
	</analytic>
	<monogr>
		<title level="j">Applied Sciences</title>
		<imprint>
			<biblScope unit="volume">12</biblScope>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Mental models and human reasoning</title>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">N</forename><surname>Johnson-Laird</surname></persName>
		</author>
		<idno type="DOI">10.1073/pnas.1012933107</idno>
	</analytic>
	<monogr>
		<title level="j">Proc. Natl. Acad. Sci. U.S.A</title>
		<imprint>
			<biblScope unit="volume">107</biblScope>
			<biblScope unit="page" from="18243" to="18250" />
			<date type="published" when="2010">2010</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">C</forename><surname>Rickheit</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Gert</forename></persName>
		</author>
		<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<monogr>
		<title level="m" type="main">Mental Models in Discourse Processing and Reasoning</title>
		<author>
			<persName><surname>Habel</surname></persName>
		</author>
		<ptr target="https://books.google.pt/books?hl=pt-PT&amp;lr=&amp;id=96jBqz_ar8AC&amp;oi=fnd&amp;pg=PP1&amp;dq=mental+models+versus+reasoning+process&amp;ots=Ou3b1SOv77&amp;sig=r5NouxMzR56klQTrokyvHSclJuQ&amp;redir_esc=y#v=onepage&amp;q=mental%20models%20versus%20reasoning%20process&amp;f=false" />
		<imprint>
			<date type="published" when="1999">1999</date>
			<publisher>Elsevier Science B.V</publisher>
			<pubPlace>Amsterdam</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">Mental models, visual reasoning and interaction in information visualization: A top-down perspective</title>
		<author>
			<persName><forename type="first">Z</forename><surname>Liu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Stasko</surname></persName>
		</author>
		<idno type="DOI">10.1109/TVCG.2010.177</idno>
	</analytic>
	<monogr>
		<title level="j">IEEE Transactions on Visualization and Computer Graphics</title>
		<imprint>
			<biblScope unit="volume">16</biblScope>
			<biblScope unit="page" from="999" to="1008" />
			<date type="published" when="2010">2010</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">The Importance of Mental Models in Implementation Science</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Holtrop</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">D</forename><surname>Scherer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">D</forename><surname>Matlock</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">E</forename><surname>Glasgow</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">A</forename><surname>Green</surname></persName>
		</author>
		<idno type="DOI">10.3389/fpubh.2021.680316</idno>
	</analytic>
	<monogr>
		<title level="j">Front. Public Health</title>
		<imprint>
			<biblScope unit="volume">9</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">&apos;it&apos;s reducing a human being to a percentage&apos;: Perceptions of justice in algorithmic decisions</title>
		<author>
			<persName><forename type="first">R</forename><surname>Binns</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Van Kleek</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Veale</surname></persName>
		</author>
		<author>
			<persName><forename type="first">U</forename><surname>Lyngs</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Shadbolt</surname></persName>
		</author>
		<idno type="DOI">10.1145/3173574.3173951</idno>
		<idno>doi:10.1145/ 3173574.3173951</idno>
		<ptr target="https://doi.org/10.1145/3173574.3173951" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, CHI &apos;18</title>
				<meeting>the 2018 CHI Conference on Human Factors in Computing Systems, CHI &apos;18<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2018">2018</date>
			<biblScope unit="page" from="1" to="14" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Explanatory debugging: Supporting end-user debugging of machine-learned programs</title>
		<author>
			<persName><forename type="first">T</forename><surname>Kulesza</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Stumpf</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Burnett</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W.-K</forename><surname>Wong</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Riche</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Moore</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Oberst</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Shinsel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Mcintosh</surname></persName>
		</author>
		<idno type="DOI">10.1109/VLHCC.2010.15</idno>
	</analytic>
	<monogr>
		<title level="m">IEEE Symposium on Visual Languages and Human-Centric Computing</title>
				<imprint>
			<date type="published" when="2010">2010. 2010</date>
			<biblScope unit="page" from="41" to="48" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">Complementing a Clinical Trial With Human-Computer Interaction: Patients&apos; User Experience With Telehealth</title>
		<author>
			<persName><forename type="first">S</forename><surname>Jalil</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Myers</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Atkinson</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Soden</surname></persName>
		</author>
		<idno type="DOI">10.2196/humanfactors.9481</idno>
	</analytic>
	<monogr>
		<title level="j">JMIR Human Factors</title>
		<imprint>
			<biblScope unit="volume">6</biblScope>
			<biblScope unit="page">e9481</biblScope>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<monogr>
		<author>
			<persName><forename type="first">T</forename><surname>Dagdelen</surname></persName>
		</author>
		<ptr target="https://www.diva-portal.org/smash/record.jsf?pid=diva2%3A1366483&amp;dswid=8599" />
		<title level="m">Modernizing the User Interface of a Legacy System at the Swedish Police Authority : Collaborative Mental Model: A New Participatory Design Method</title>
				<imprint>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<monogr>
		<author>
			<persName><forename type="first">D.-G</forename><surname>Of</surname></persName>
		</author>
		<ptr target="https://normas.dgs.min-saude.pt/2018/09/13/rastreio-da-retinopatia-diabetica/" />
		<title level="m">Health of Portugal, Rastreio da retinopatia diabética -portal das normas clínicas</title>
				<imprint>
			<date type="published" when="2018">2018</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">Comparison of Early Treatment Diabetic Retinopathy Study Standard 7-Field Imaging With Ultrawide-Field Imaging for Determining Severity of Diabetic Retinopathy</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">P</forename><surname>Aiello</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Odia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">R</forename><surname>Glassman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Melia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">M</forename><surname>Jampol</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">M</forename><surname>Bressler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Kiss</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">S</forename><surname>Silva</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">C</forename><surname>Wykoff</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">K</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">R C R</forename><surname>Network</surname></persName>
		</author>
		<idno type="DOI">10.1001/jamaophthalmol.2018.4982</idno>
		<idno>arXiv:30347105</idno>
	</analytic>
	<monogr>
		<title level="j">JAMA Ophthalmol</title>
		<imprint>
			<biblScope unit="volume">137</biblScope>
			<biblScope unit="page" from="65" to="73" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<monogr>
		<title level="m" type="main">Criteria for adequacy of a cervical cytology sample | Eurocytology</title>
		<author>
			<persName><surname>Eurocytology</surname></persName>
		</author>
		<ptr target="-line" />
		<imprint>
			<date type="published" when="2022-10-13">2022. 13. Oct. 2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">Implementation and evaluation of a mobile retinal image acquisition system for screening diabetic retinopathy: Study protocol</title>
		<author>
			<persName><forename type="first">S</forename><surname>Rêgo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Monteiro-Soares</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Dutra-Medeiros</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Soares</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">C</forename><surname>Dias</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Nunes</surname></persName>
		</author>
		<idno type="DOI">10.3390/diabetology3010001</idno>
		<ptr target="https://www.mdpi.com/2673-4540/3/1/1.doi:10.3390/diabetology3010001" />
	</analytic>
	<monogr>
		<title level="j">Diabetology</title>
		<imprint>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="page" from="1" to="16" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">A Review of Computational Methods for Cervical Cells Segmentation and Abnormality Classification</title>
		<author>
			<persName><forename type="first">T</forename><surname>Conceição</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Braga</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Rosado</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">J M</forename><surname>Vasconcelos</surname></persName>
		</author>
		<idno type="DOI">10.3390/ijms20205114</idno>
	</analytic>
	<monogr>
		<title level="j">Int. J. Mol. Sci</title>
		<imprint>
			<biblScope unit="volume">20</biblScope>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">OCTA Multilayer and Multisector Peripapillary Microvascular Modeling for Diagnosing and Staging of Glaucoma</title>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">A</forename><surname>De Jesus</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">S</forename><surname>Brea</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">B</forename><surname>Breda</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Fokkinga</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Ederveen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Borren</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Bekkers</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Pircher</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Stalmans</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Klein</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Van Walsum</surname></persName>
		</author>
		<idno type="DOI">10.1167/tvst.9.2.58</idno>
	</analytic>
	<monogr>
		<title level="j">Trans. Vis. Sci. Tech</title>
		<imprint>
			<biblScope unit="volume">9</biblScope>
			<biblScope unit="page">58</biblScope>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<monogr>
		<title level="m" type="main">Computer-aided cervical cancer screening</title>
		<author>
			<persName><surname>Clare</surname></persName>
		</author>
		<ptr target="https://www.aicos.fraunhofer.pt/en/our_work/projects/clare.html" />
		<imprint>
			<date type="published" when="2023-02-15">2023. 15. Feb. 2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<monogr>
		<title level="m" type="main">oTranscribe: A free web app to take the pain out of transcribing recorded interviews</title>
		<author>
			<persName><forename type="first">E</forename><surname>Bentley</surname></persName>
		</author>
		<ptr target="https://otranscribe.com/" />
		<imprint>
			<date type="published" when="2023-02-15">2023. 15. Feb. 2023</date>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
