<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">Towards AI-driven Next Generation Personalized Healthcare and Well-being</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Fatih</forename><surname>Aksu</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Biomedical Sciences</orgName>
								<orgName type="institution">Humanitas University</orgName>
								<address>
									<settlement>Milan</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Alessandro</forename><surname>Bria</surname></persName>
							<affiliation key="aff1">
								<orgName type="department">Department of Electrical and Information Engineering</orgName>
								<orgName type="institution">University of Cassino and Southern Latium</orgName>
								<address>
									<settlement>Cassino</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Alice</forename><forename type="middle">Natalina</forename><surname>Caragliano</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Camillo</forename><forename type="middle">Maria</forename><surname>Caruso</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Wenting</forename><surname>Chen</surname></persName>
							<affiliation key="aff3">
								<orgName type="institution">City University of Hong Kong</orgName>
							</affiliation>
						</author>
						<author role="corresp">
							<persName><forename type="first">Ermanno</forename><surname>Cordelli</surname></persName>
							<email>e.cordelli@unicampus.it</email>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Omar</forename><surname>Coser</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff4">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Advanced Robotics and Human-Centered Technologies</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Arianna</forename><surname>Francesconi</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Leonardo</forename><surname>Furia</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Valerio</forename><surname>Guarrasi</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Giulio</forename><surname>Iannello</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Clemente</forename><surname>Lauretti</surname></persName>
							<affiliation key="aff4">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Advanced Robotics and Human-Centered Technologies</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Guido</forename><surname>Manni</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff4">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Advanced Robotics and Human-Centered Technologies</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Giustino</forename><surname>Marino</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Domenico</forename><surname>Paolo</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Filippo</forename><surname>Ruffini</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Linlin</forename><surname>Shen</surname></persName>
							<affiliation key="aff5">
								<orgName type="institution">Shenzhen University</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Rosa</forename><surname>Sicilia</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Paolo</forename><surname>Soda</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff6">
								<orgName type="department" key="dep1">Department of Diagnostics and Intervention, Radiation Physics</orgName>
								<orgName type="department" key="dep2">Biomedical Engineering</orgName>
								<orgName type="institution">Umeå University</orgName>
								<address>
									<country key="SE">Sweden</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Christian</forename><surname>Tamantini</surname></persName>
							<affiliation key="aff4">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Advanced Robotics and Human-Centered Technologies</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Matteo</forename><surname>Tortora</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Zhuoru</forename><surname>Wu</surname></persName>
							<affiliation key="aff5">
								<orgName type="institution">Shenzhen University</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Loredana</forename><surname>Zollo</surname></persName>
							<affiliation key="aff4">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Unit of Advanced Robotics and Human-Centered Technologies</orgName>
								<orgName type="institution">University Campus Bio-Medico of Rome</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">Towards AI-driven Next Generation Personalized Healthcare and Well-being</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">B5E3FD61B253F5692236D4D4EE209F44</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2025-04-23T16:55+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Artificial Intelligence</term>
					<term>Multimodal Learning</term>
					<term>Precision Medicine</term>
					<term>Stress Detection</term>
					<term>Resilient AI</term>
					<term>Healthcare Robotics</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>In the last few years Artificial Intelligence (AI) is emerging as a game changer in many areas of society and, in particular, its integration in medicine heralds a transformative approach towards personalized healthcare and well-being, promising significant improvements in diagnostic precision, therapeutic outcomes, and patient care. Our research explores the cuttingedge realms of multimodal AI, resilient AI, and healthcare robotics, aiming to harness the synergy of diverse data modalities and advanced computational models to redefine healthcare paradigms. This multidisciplinary effort seeks to bridge technology and clinical practice, advancing AI-driven next generation personalized healthcare and well-being.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>Artificial Intelligence (AI) has proven itself as enabling factor for triggering great transformations of society <ref type="bibr" target="#b0">[1,</ref><ref type="bibr" target="#b1">2,</ref><ref type="bibr" target="#b2">3,</ref><ref type="bibr" target="#b3">4]</ref>. However on the verge of the fifth industrial revolution, there are several challenges that involve the consolidation of AI arrival in sectors as medicine and people well-being. Indeed this paradigm shift towards AIdriven healthcare is not just a technological revolution; it represents a comprehensive reimagining of medical practices, enhancing the quality, efficiency, and accessibility of healthcare services. In this scenario our efforts are directed towards four research paths: (i) multimodal AI for precision medicine (section 2); (ii) multimodal AI to foster wellbeing (section 3); (iii) resilient AI (section 4); (iv) AI in robotics for healthcare (section 5). For each of these routes we provide a brief description of the developed solutions, highlighting solved problems and open challenges.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Multimodal AI enables precision medicine</head><p>The evolution of precision medicine marks a paradigm shift from the traditional "one-size-fits-all" approach in healthcare towards tailored therapeutic strategies that account for individual variability in genes, environment, and lifestyle. In this context, leveraging the variety of patient generated data (as images, clinical data, electronic health records etc.) can provide a significant boost to unlocking a holistic view of the patient. Towards this end multimodal AI provides the ultimate tool <ref type="bibr" target="#b4">[5,</ref><ref type="bibr" target="#b5">6]</ref>: the integration is not merely additive, it's transformative, enabling the extraction of insights that would remain obscured under traditional, unimodal analysis. We are currently studying the potential of multimodal AI for precision medicine facing different challenges in different application domains: in the oncological domain we face challenges regarding data fusion and representation, with two projects on Non-Small Cell Lung Cancer (NSCLC) (sections 2. As a prior contribution, EHRs are vital resources for documenting patient clinical history and procedures, but are often challenging to process due to their unstructured nature. Natural Language Processing (NLP) tools, particularly Named Entity Recognition (NER) with the use of Transformer-based models, have proven effective in extracting meaningful information from EHRs <ref type="bibr" target="#b6">[7]</ref>. Transformers excel at capturing contextual relationships between words and the still not thoroughly explored contextual embedding they create can enhance the understanding of the content itself. We propose the Hieararchical Embedding Attention for overall survivaL (HEAL), a methodology that leverages multi-class NER-driven representations from EHRs by weighting them with attentional mechanisms. The ability of emphasizing clinically relevant information within unstructured data, operating both at word and sentence levels, makes HEAL more interpretable for medical applications. In a NSCLC Overall Survival (OS) prediction case study, HEAL achieved an average 𝐶 𝑡𝑑 -index of 0.639 and a low standard deviation of 0.014 over 5 runs, showing a statistically significant superiority with respect to manually extracted clinical features.</p><p>Our second contribution, even if still at its preliminary steps, grounds on the fact that deep learning (DL) approaches have demonstrated significant value in automatically learning potentially relevant patterns from medical images, such as computed tomography (CT) <ref type="bibr" target="#b7">[8]</ref>. Hence, in this study we explore a novel methodology for predicting OS in NSCLC patients using only CT images, aiming at a multitask architecture that encompasses prognostic factors like Progression-Free Survival (PFS) beyond predicting OS alone. The first steps in this direction include producing a soft attention weighted feature map for each input slice and highlighting the relevant slices crucial for predicting OS outcome.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.">PICTURE</head><p>PICTURE stands for "Pathological response AI-driven prediCTion after neoadjUvant theRapiEs in NSCLC". This project is based on the central hypothesis that heterogeneous medical data (i.e. radiological images, histology images, cytology and molecular data and EHRs) are consistent with the pathological complete response (pCR), so their combination using artificial intelligence (AI) can provide accurate pCR prediction in NSCLC patients. Indeed, albeit treating locally advanced NSCLC surgically is the mainstay, it is important to prevent post-surgery recurrence, and neoadjuvant therapy (NAT) has shown potential in enhancing overall survival rates and achieving a complete pathological response, that, if correctly evaluated before the treatment, can even avoid non necessary surgical resections.</p><p>PICTURE pursues three objectives: (i) pCr prediction through radiology imaging, histology, citology, molecular data, EHRs, and their combination; (ii) leveraging multimodal deep learning to make the performance of AI resilient and robust for pCR prediction signature; (iii) improving trust and transparency using explainable AI models. PICTURE also has the exploratory aim of transferring trained models to predict pCR for patients undergoing chemoimmunotherapy, tailoring treatments to the individual needs of patients.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.3.">Facing imbalance in Alzheimer's Disease diagnosis and prognosis</head><p>Alzheimer's disease (AD) is a progressive neurodegenerative condition with decline in cognitive function, and because of the lack of a cure, its early detection is paramount. Despite the recent progress in AI, challenges such as class imbalance, integration of multimodal data, and robust generalization remain pervasive. In response to this we introduce a novel methodology that leverages the strengths of ensemble learning while incorporating advanced fusion techniques. For each of the 4 modalities of the tabular ADNI database, we train a series of classifiers on varied class distributions followed by a late fusion strategy that integrates the different modalities to improve the results. Our framework is evaluated on two diagnostic tasks (binary and ternary) and four binary prognostic tasks (at 12, 24, 36, and 48 months) and compared with 12 stateof-the-art imbalanced data algorithms, achieving 97.04% g-mean on the binary diagnostic task and 90.81% g-mean on the 48-month prognostic task.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.4.">Multi-Dataset Multi-Task Learning for COVID-19 Prognosis</head><p>In COVID-19 context <ref type="bibr" target="#b8">[9]</ref> in order to fight the scarcity of large, labelled chest radiographic images (CXR) datasets, we introduce a novel multi-dataset multi-task (MDMT) training framework, by integrating correlated datasets from disparate sources and assessing severity score to classify prognostic severity groups <ref type="bibr" target="#b9">[10,</ref><ref type="bibr" target="#b10">11,</ref><ref type="bibr" target="#b11">12]</ref>, instead of relying on datasets with multiple and correlated labelling schemes. As illustrated in figure <ref type="figure" target="#fig_0">1</ref> a deep CNN takes the images as input and branches into task-specific fully connected output networks, to end with a multi-task loss function incorporating an indicator function to exploit multi-dataset integration.  Proceeding with a 5 cross-validation and leave one center out training, we evaluated the method across 18 different CNN backbone on prognosis classification task and fine-tuning from BRIXIA dataset to AIforCOVID dataset task. Best average performance with statistical robustness achieved: 68.6% accuracy, 66.6% F1-score and 68.5% g-mean for the 5 cross validation, and 65.7% accuracy, 64.3% F1-score and 66.0% g-mean for the leave-onecenter-out validation strategy. Future directions include new domains and the integration of XAI <ref type="bibr" target="#b5">[6]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Multimodal AI to foster well-being</head><p>Stress, a response to physical and emotional demands, is crucial in determining individuals' well-being and if unmanaged can lead to conditions such as anxiety, depression and cardiovascular diseases. Also in this scenario multimodal AI offers a tool for proactive approach to health management, in order to provide real-time monitoring and interventions, thereby mitigating long-term health risks associated with chronic stress. We are targeting stress detection from two perspectives: first, we are focusing on maximising the stress level prediction accuracy within the shortest possible time, exploiting multimodal physiological time series data and  Deep Reinforcement Learning (DRL); second, we are further expanding the multimodal view integrating information from video, audio and text with the physiological data. Robust and fast stress detection approaches can bring benefit in several contexts: from providing a targeted and more personal assistance to patients, to ensuring safety for workers, for instance, Air Traffic Controllers (ATC) that endure high levels of psychological pressure during their job impacting operational safety.</p><p>Our first approach <ref type="bibr" target="#b12">[13]</ref> employs a new DRL model to identify stress indicators. We obtained this by leveraging a dynamic time observation window that expands each step of the learning process, asking the agent to choose either to continue observing or to classify based on the information gathered until that point, trying to minimize the amount of data required for decision-making. As depicted in the figure Figure <ref type="figure" target="#fig_2">2</ref>, we adopted the Soft Actor-Critic algorithm for its effectiveness in handling continuous action spaces. In a Leave-One-Subject-Out approach with data augmentation on the Non-EEG public dataset we outperformed existing solutions, showing the power of DRL for early stress detection.</p><p>On top of this approach we are exploring the larger multimodal asset of the Ulm-Trier Social Stress Test dataset (ULM-TSST, MuSe 2022 challenge), containing 41 training, 14 validation and 14 test subjects, simulating a job interview scenario, with audio, video, text, and physiological data modalities, rated on arousal and valence stress parameters. The aim is to build a high performance architecture that leverages non invasive modalities for stress detection that can be employed in work environments. In both cases the scarcity of large datasets that provide a quantitative measurement of stress is still the main challenge: we will try to face it considering the construction of robust and specific acquisition protocols to test the effectiveness of the developed approaches in real-world scenarios.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Resilient AI</head><p>Due to the high stakes involved in healthcare decisions, the sensitivity of medical data, and the complexity of medical environments, AI systems should be designed to maintain their intended performance and integrity in the face of adversities, as data corruption, missing data, privacy leakage, or unexpected changes in their operating environment. This is the goal of Resilient AI, which is an aspect that cannot be left out when the aim is to integrate AI for augmenting the medical practice.</p><p>To meat this goal we are currently investigating three main aspects that fall under the Resilient AI umbrella: developing systems robust to missing data, the challenge of limited extension datasets and how to protect sensitive patients data.</p><p>With respect to the missing data challenge <ref type="bibr" target="#b13">[14]</ref>, although a variety of strategies exist for addressing this problem in health datasets, to overcome the obstacle to select the most suitable one and their dependency on the dataset's specifics, we developed a Transformer-based model <ref type="bibr" target="#b14">[15]</ref> that applies masking to ignore the missing data, thus eliminating the need of imputation and deletion techniques and focusing directly only on the available features through self-attention. Moreover, we introduced a novel feature-identifying form of positional encoding to facilitate the integration of tabular data into a Transformer framework. This method was validated through an overall survival classification task, employing clinical data from the CLARO <ref type="bibr" target="#b15">[16]</ref> project and improving the prediction accuracy.</p><p>In order to address the problem of working with datasets limited in the extension, particularly frequent in healthcare domain, Triplet networks, a subtype of the Siamese networks, emerge as a promising solution, comprising three identical networks operating concurrently. Throughout training of these three networks two inputs belong to the same class, whereas the third belongs to a distinct class, with the final objective to develop a feature space with two distinct clusters one per class by incorporating inter-class diversities alongside intra-class similarities and providing scenarios with limited data with more triplets compared to instances (Figure <ref type="figure" target="#fig_3">3</ref>). In our study <ref type="bibr" target="#b16">[17]</ref>, using a private dataset of 86 CT scans, triplet networks surpass the plain deep networks in accurately predicting the histological subtypes of NSCLC patients. Currently, we are broadening the scope of our research including PET images alongside CT scans and adopting a multimodal strategy for the same classification. By integrating these complementary data we anticipate achieving a significant improvement and overcoming the challenges posed by limited data scenarios. Last but not least, the challenge related to patient privacy led us to explore Federated learning (FL). FL presents an innovative solution to the challenge of protecting sensitive patient data in artificial intelligence applications in healthcare, in fact enabling the training of a shared global model with a central server while ensuring data privacy within local institutions. On this basis we introduce a new token-based FL paradigm, revolutionizing the traditional approach with sequential or random passing of a token between clients during each epoch. This innovative method allows only the token owner to send the weights to the server, which redistributes them directly to all models. By eliminating local training epochs and allowing immediate transmission, this paradigm shift streamlines the process by circulating a single model among clients and also mitigating the need for an initial warm-up period, potentially paving the way for a decentralized system that reduces dependence on a central server and minimizes the number of parameters transmitted in each iteration. Results on the tabular part of the AIforCOVID dataset <ref type="bibr" target="#b17">[18]</ref> composed of 6 hospitals show that the performance of the FL model does not deviate from that of its equivalent trained on all datasets aggregated into a single pool. The next steps will focus on integrating other modalities into the FL pipeline, such as CXR scans of the AIforCOVID dataset itself.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.">AI for healthcare robotics</head><p>The integration of robotics in healthcare settings exemplifies another dimension of AI's impact, automating routine tasks, assisting in surgeries with precision beyond human capability, and providing rehabilitation support to patients. This not only enhances service delivery but also alleviates the workload on healthcare professionals, allowing them to focus more on patient-centered care. In this scenario we pursue two aims: first, enhancing robotic surgery with real-time high precision localiza- For the first objective we focus on the laparoscopy use case, as one of the preferred surgical methods. Despite recent advancements in image acquisition it is still limited to rely on 2D images view: misinterpreting anatomical structures due to this limit is a common source of errors. In contrast, 3D imaging increases the accuracy of instrument manipulation, leads to better outcomes in surgery, and shortens the learning for trainees. Even several research in surgical 3D imaging has been explored, like camera-based tracking and mapping, Mosaicking, Structure from Motion, and Shape from Template, they often rely on simplifications that can limit their effectiveness. On this ground Simultaneous Localization and Mapping (SLAM) has shown promising results, as it aims to create a map of the environment while localizing the sensor position within it. Therefore we developed a robust deep learning SLAM pipeline to operate in real-time across diverse surgical settings by providing an immersive, interactive 3D environment (Figure <ref type="figure" target="#fig_4">4</ref>), allowing for more precise and personalized interventions with the future possibility to be integrated with augmented reality displays.</p><p>For the second objective, we focus on the challenges in the field of lower limb robotics. It aims at supporting people with lower limb disabilities by enhancing movement, mobility, and providing targeted exercise. Technologies as exoskeletons, prosthetics, and rehabilitation robots, are particularly helpful for those with neurological issues, offering improved rehabilitation, independence, and tailored care. Effective use requires precise control settings to adapt walking patterns to different terrains. Challenges in this field involve the extensive need for sensors for terrain detection and the complexity in processing sensor data. Simplifying sensor requirements to accurately determine terrain and slope is critical for user-friendly, efficient operation, and safety. The aim of our work is to recognize the terrain on which an exoskeleton is walking and its inclination. Among several state-ofthe-art driven approaches, we achieved promising results using LSTM architectures with IMU data (0.94 of accuracy in Leave-one-out cross-validation), and CNN-LSTM architectures with EMG data (0.75 of accuracy). The fusion of IMU and EMG data didn't bring any significant improvement, as explanatory tests indicated that the best 20 contributing features belong to IMU. Next, by varying the number of sensors, and therefore features, we noticed that the best results are achieved by selecting the most relevant features, from one to three, according to SHAP (on a 3 subjects validation set), leading to 0.85, 0.89 and 0.93 of accuracy respectively. Lastly, we found that LSTM and CNN-LSTM are valid architectures for slope inclination prediction (MAE of 1.95°) and stair height (MAE of 15.65 mm), without significant differences in employing 3 or 4 sensors.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Overview of the proposed Multi-Dataset Multi-Task model architecture, composed by a shared backbone 𝑓 𝑠 and two task-specific fully connected network heads, 𝑓 𝜏 1 and 𝑓 𝜏 2 , for tasks 𝜏 1 and 𝜏 2 , respectively, producing outputs 𝑂 𝜏 1 and 𝑂 𝜏2.</figDesc><graphic coords="3,89.29,254.05,204.32,69.50" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: Overview of the proposed method for early stress detection consisting of two main blocks: physiological environment and DRL agent. The first involves the pre-processing of data and the description of the dynamic observation space. The second block incorporates a SAC-based DRL agent fed with data from the first block to control the system.</figDesc><graphic coords="3,299.03,114.05,113.87,66.42" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Figure 3 :</head><label>3</label><figDesc>Figure 3: Overall framework of the proposed method working with triplet networks.</figDesc><graphic coords="4,302.62,84.19,203.36,122.42" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Figure 4 :</head><label>4</label><figDesc>Figure 4: The MVSLAM pipeline integrates depth estimation, pose estimation, and 3D reconstruction modules to generate a continuously updated 3D map of the surgical environment from monocular endoscopic video frames.</figDesc><graphic coords="5,89.29,84.19,203.37,114.39" type="bitmap" /></figure>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgments</head><p>Fatih Aksu, Alice Natalina Caragliano, Camillo Maria Caruso, Omar Coser, Arianna Francesconi, Leonardo Furia, Guido Manni, Giustino Marino, Domenico Paolo and Filippo Ruffini are Ph.D. students enrolled in the National Ph.D. in Artificial Intelligence, course on Health and life sciences, organized by Università Campus Bio-Medico di Roma. We acknowledge financial support from: i) PNRR MUR project PE0000013-FAIR; ii) PRIN 2022 MUR 20228MZFAA-AIDA (CUP C53D23003620008); iii) PRIN PNRR 2022 MUR P2022P3CXJ-PICTURE (CUP C53D23009280001); iv) FCS MISE (CUP B89J23000580005); v) MAECI (grant n. CN23GR09); vi) NRR MUR project PNC0000007 Fit4MedRob. This work was also partially supported by the following companies: Eustema S.p.A. and ENAV S.p.A..</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">Building an ai-enabled metaverse for intelligent healthcare: opportunities and challenges</title>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Tronchin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">M</forename><surname>Caruso</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Rofena</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Manni</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Aksu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Paolo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Iannello</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sicilia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Cordelli</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Ital-IA 2023, Italia Intelligenza Artificiale Thematic Workshops, co-located with the 3rd CINI National Lab AIIS Conference on Artificial Intelligence (Ital IA 2023)</title>
				<meeting><address><addrLine>Pisa, Italy</addrLine></address></meeting>
		<imprint>
			<publisher>CEUR-WS</publisher>
			<date type="published" when="2023">May 29-30, 2023. 2023</date>
			<biblScope unit="page" from="134" to="139" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Making ai trustworthy in multimodal and healthcare scenarios</title>
		<author>
			<persName><forename type="first">E</forename><surname>Cordelli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Iannello</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Ruffini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sicilia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Tronchin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Ital-IA</title>
				<meeting>the Ital-IA</meeting>
		<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Artificial intelligence and the future of web 3.0: Opportunities and challenges ahead</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">P</forename><surname>Bharadiya</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">American Journal of Computer Science and Technology</title>
		<imprint>
			<biblScope unit="volume">6</biblScope>
			<biblScope unit="page" from="91" to="96" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">A systematic literature review on the impact of artificial intelligence on workplace outcomes: A multiprocess perspective</title>
		<author>
			<persName><forename type="first">V</forename><surname>Pereira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Hadjielias</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Christofi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Vrontis</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Human Resource Management Review</title>
		<imprint>
			<biblScope unit="volume">33</biblScope>
			<biblScope unit="page">100857</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Multi-objective optimization determines when, which and how to fuse deep networks: An application to predict covid-19 outcomes</title>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Computers in Biology and Medicine</title>
		<imprint>
			<biblScope unit="volume">154</biblScope>
			<biblScope unit="page">106625</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<monogr>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Tronchin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Albano</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Faiella</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Fazzini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Santucci</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2212.14084</idno>
		<title level="m">Multimodal explainability via latent shift applied to covid-19 stratification</title>
				<imprint>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">Named entity recognition in italian lung cancer clinical reports using transformers</title>
		<author>
			<persName><forename type="first">D</forename><surname>Paolo</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</title>
				<imprint>
			<publisher>IEEE</publisher>
			<date type="published" when="2023">2023. 2023</date>
			<biblScope unit="page" from="4101" to="4107" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Radiopathomics: multimodal learning in non-small cell lung cancer for adaptive radiotherapy</title>
		<author>
			<persName><forename type="first">M</forename><surname>Tortora</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Access</title>
		<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">Assessing the impact of data-driven limitations on tracing and forecasting the outbreak dynamics of covid-19</title>
		<author>
			<persName><forename type="first">G</forename><surname>Fiscon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Salvadore</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">R</forename><surname>Garbuglia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Paci</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Computers in biology and medicine</title>
		<imprint>
			<biblScope unit="volume">135</biblScope>
			<biblScope unit="page">104657</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">A multi-expert system to detect covid-19 cases in x-ray images</title>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">C</forename><surname>D'amico</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sicilia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Cordelli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">IEEE 34th International Symposium on Computer-Based Medical Systems (CBMS), IEEE</title>
				<imprint>
			<date type="published" when="2021">2021. 2021</date>
			<biblScope unit="page" from="395" to="400" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">Pareto optimization of deep networks for covid-19 diagnosis from chest x-rays</title>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">C</forename><surname>D'amico</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sicilia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Cordelli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Pattern Recognition</title>
		<imprint>
			<biblScope unit="volume">121</biblScope>
			<biblScope unit="page">108242</biblScope>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Optimized fusion of cnns to diagnose pulmonary diseases on chest x-rays</title>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">International Conference on Image Analysis and Processing</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2022">2022</date>
			<biblScope unit="page" from="197" to="209" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">Exploring early stress detection from multimodal time series with deep reinforcement learning</title>
		<author>
			<persName><forename type="first">L</forename><surname>Furia</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</title>
				<imprint>
			<publisher>IEEE</publisher>
			<date type="published" when="2023">2023. 2023</date>
			<biblScope unit="page" from="1917" to="1920" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<monogr>
		<author>
			<persName><forename type="first">A</forename><surname>Rofena</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Sarli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">L</forename><surname>Piccolo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Sammarra</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">B</forename><surname>Zobel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2308.00471</idno>
		<title level="m">A deep learning approach for virtual contrast enhancement in contrast enhanced spectral mammography</title>
				<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b14">
	<monogr>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">M</forename><surname>Caruso</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Ramella</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2307.11465</idno>
		<title level="m">A deep learning approach for overall survival analysis with missing values</title>
				<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b15">
	<monogr>
		<ptr target="http://www.cosbi-lab.it/claro/" />
		<title level="m">CLARO -CoLlAborative multi-sources Radiopathomics approach for personalized Oncology in nonsmall cell lung cancer</title>
				<imprint>
			<date type="published" when="2020">2020. 2023-03-20</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">Early experiences on using triplet networks for histological subtype classification in non-small cell lung cancer</title>
		<author>
			<persName><forename type="first">F</forename><surname>Aksu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">IEEE 36th International Symposium on Computer-Based Medical Systems (CBMS), IEEE</title>
				<imprint>
			<date type="published" when="2023">2023. 2023</date>
			<biblScope unit="page" from="832" to="837" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">Aiforcovid: Predicting the clinical outcomes in patients with covid-19 applying ai to chest-x-rays. an italian multicentre study</title>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">C</forename><surname>D'amico</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Tessadori</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Valbusa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Bortolotto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">U</forename><surname>Akbar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sicilia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Cordelli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Fazzini</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Medical image analysis</title>
		<imprint>
			<biblScope unit="volume">74</biblScope>
			<biblScope unit="page">102216</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
