<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Archiving and Interchange DTD v1.0 20120330//EN" "JATS-archivearticle1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta />
    <article-meta>
      <title-group>
        <article-title>Towards AI-driven Next Generation Personalized Healthcare and Well-being</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="author">
          <string-name>Fatih Aksu</string-name>
          <xref ref-type="aff" rid="aff1">1</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Alessandro Bria</string-name>
          <xref ref-type="aff" rid="aff3">3</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Alice Natalina Caragliano</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Camillo Maria Caruso</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Wenting Chen</string-name>
          <xref ref-type="aff" rid="aff0">0</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Ermanno Cordelli</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Omar Coser</string-name>
          <xref ref-type="aff" rid="aff5">5</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Arianna Francesconi</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Leonardo Furia</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Valerio Guarrasi</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Giulio Iannello</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Clemente Lauretti</string-name>
          <xref ref-type="aff" rid="aff5">5</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Guido Manni</string-name>
          <xref ref-type="aff" rid="aff5">5</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Giustino Marino</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Domenico Paolo</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Filippo Rufini</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Linlin Shen</string-name>
          <xref ref-type="aff" rid="aff4">4</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Rosa Sicilia</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Paolo Soda</string-name>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Christian Tamantini</string-name>
          <xref ref-type="aff" rid="aff5">5</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Matteo Tortora</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Zhuoru Wu</string-name>
          <xref ref-type="aff" rid="aff4">4</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Loredana Zollo</string-name>
          <xref ref-type="aff" rid="aff5">5</xref>
        </contrib>
        <aff id="aff0">
          <label>0</label>
          <institution>City University of Hong Kong</institution>
        </aff>
        <aff id="aff1">
          <label>1</label>
          <institution>Department of Biomedical Sciences, Humanitas University</institution>
          ,
          <addr-line>Milan</addr-line>
          ,
          <country country="IT">Italy</country>
        </aff>
        <aff id="aff2">
          <label>2</label>
          <institution>Department of Diagnostics and Intervention, Radiation Physics, Biomedical Engineering, Umeå University</institution>
          ,
          <country country="SE">Sweden</country>
        </aff>
        <aff id="aff3">
          <label>3</label>
          <institution>Department of Electrical and Information Engineering, University of Cassino and Southern Latium</institution>
          ,
          <addr-line>Cassino</addr-line>
          ,
          <country country="IT">Italy</country>
        </aff>
        <aff id="aff4">
          <label>4</label>
          <institution>Shenzhen University</institution>
        </aff>
        <aff id="aff5">
          <label>5</label>
          <institution>Unit of Advanced Robotics and Human-Centered Technologies, Department of Engineering, University Campus Bio-Medico of Rome</institution>
          ,
          <country country="IT">Italy</country>
        </aff>
        <aff id="aff6">
          <label>6</label>
          <institution>Unit of Computer Systems and Bioinformatics, Department of Engineering, University Campus Bio-Medico of Rome</institution>
          ,
          <country country="IT">Italy</country>
        </aff>
      </contrib-group>
      <abstract>
        <p>In the last few years Artificial Intelligence (AI) is emerging as a game changer in many areas of society and, in particular, its integration in medicine heralds a transformative approach towards personalized healthcare and well-being, promising significant improvements in diagnostic precision, therapeutic outcomes, and patient care. Our research explores the cuttingedge realms of multimodal AI, resilient AI, and healthcare robotics, aiming to harness the synergy of diverse data modalities and advanced computational models to redefine healthcare paradigms. This multidisciplinary efort seeks to bridge technology and clinical practice, advancing AI-driven next generation personalized healthcare and well-being.</p>
      </abstract>
      <kwd-group>
        <kwd>eol&gt;Artificial Intelligence</kwd>
        <kwd>Multimodal Learning</kwd>
        <kwd>Precision Medicine</kwd>
        <kwd>Stress Detection</kwd>
        <kwd>Resilient AI</kwd>
        <kwd>Healthcare Robotics</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec id="sec-1">
      <title>1. Introduction</title>
      <p>
        Artificial Intelligence (AI) has proven itself as enabling
factor for triggering great transformations of society [
        <xref ref-type="bibr" rid="ref1 ref2">1,
2, 3, 4</xref>
        ]. However on the verge of the fifth industrial
revolution, there are several challenges that involve the
consolidation of AI arrival in sectors as medicine and
people well-being. Indeed this paradigm shift towards
AIdriven healthcare is not just a technological revolution;
it represents a comprehensive reimagining of medical
practices, enhancing the quality, eficiency, and
accessibility of healthcare services. In this scenario our eforts
are directed towards four research paths: (i) multimodal
AI for precision medicine (section 2); (ii) multimodal AI
to foster wellbeing (section 3); (iii) resilient AI (section 4);
(iv) AI in robotics for healthcare (section 5). For each of
these routes we provide a brief description of the
developed solutions, highlighting solved problems and open
challenges.
      </p>
    </sec>
    <sec id="sec-2">
      <title>2. Multimodal AI enables precision medicine</title>
      <p>The evolution of precision medicine marks a paradigm
shift from the traditional "one-size-fits-all" approach in
healthcare towards tailored therapeutic strategies that
account for individual variability in genes, environment,
and lifestyle. In this context, leveraging the variety of
patient generated data (as images, clinical data, electronic
health records etc.) can provide a significant boost to
unlocking a holistic view of the patient. Towards this
end multimodal AI provides the ultimate tool [5, 6]: the
integration is not merely additive, it’s transformative,
enabling the extraction of insights that would remain
obscured under traditional, unimodal analysis. We are
currently studying the potential of multimodal AI for
precision medicine facing diferent challenges in
diferent application domains: in the oncological domain we
face challenges regarding data fusion and
representation, with two projects on Non-Small Cell Lung
Cancer (NSCLC) (sections 2.1 and 2.2); in augmenting the
diagnosis and prognosis of Alzheimer (section 2.3) we
tackle the problem of imbalance in multimodal datasets;
and in COVID-19 prognosis (section 2.4) we attempt to
with compatible tasks for training deep learning models
without leading to overfitting.
slices crucial for predicting OS outcome.</p>
      <sec id="sec-2-1">
        <title>2.2. PICTURE 2.1. AIDA</title>
        <sec id="sec-2-1-1">
          <title>PICTURE stands for "Pathological response AI-driven</title>
          <p>AIDA stands for "explAinable multImodal Deep learning prediCTion after neoadjUvant theRapiEs in NSCLC". This
for personAlized oncology". This project faces the chal- project is based on the central hypothesis that
heterogelenge of advancing Multimodal Deep Learning (MDL), neous medical data (i.e. radiological images, histology
studying how to learn shared representations between images, cytology and molecular data and EHRs) are
condiferent modalities, by investigating when to fuse the sistent with the pathological complete response (pCR),
diferent modalities and how to embed in the training any so their combination using artificial intelligence (AI) can
process able to learn more powerful data representations. provide accurate pCR prediction in NSCLC patients.
InAll this is directed towards facing the association between deed, albeit treating locally advanced NSCLC surgically
radiomic, pathomic and Electronic Health Records (EHRs) is the mainstay, it is important to prevent post-surgery
in precision oncology to predict the patient outcomes in recurrence, and neoadjuvant therapy (NAT) has shown
terms of progression free survival, overall survival, re- potential in enhancing overall survival rates and
achievlapse time and response rate NSCLC, which represents ing a complete pathological response, that, if correctly
the 85% of all lung cancer cases. To pursue these objec- evaluated before the treatment, can even avoid non
nectives we started from learning unimodal representation essary surgical resections.
of EHRs and medical imaging. PICTURE pursues three objectives: (i) pCr prediction</p>
          <p>As a prior contribution, EHRs are vital resources for through radiology imaging, histology, citology,
molecdocumenting patient clinical history and procedures, but ular data, EHRs, and their combination; (ii) leveraging
are often challenging to process due to their unstructured multimodal deep learning to make the performance of
nature. Natural Language Processing (NLP) tools, par- AI resilient and robust for pCR prediction signature; (iii)
ticularly Named Entity Recognition (NER) with the use improving trust and transparency using explainable AI
of Transformer-based models, have proven efective in models. PICTURE also has the exploratory aim of
transextracting meaningful information from EHRs [7]. Trans- ferring trained models to predict pCR for patients
underformers excel at capturing contextual relationships be- going chemoimmunotherapy, tailoring treatments to the
tween words and the still not thoroughly explored con- individual needs of patients.
textual embedding they create can enhance the
understanding of the content itself. We propose the Hieararchi- 2.3. Facing imbalance in Alzheimer’s
cal Embedding Attention for overall survivaL (HEAL), a
Disease diagnosis and prognosis
methodology that leverages multi-class NER-driven
representations from EHRs by weighting them with atten- Alzheimer’s disease (AD) is a progressive
neurodegentional mechanisms. The ability of emphasizing clinically erative condition with decline in cognitive function,
relevant information within unstructured data, operating and because of the lack of a cure, its early detection is
both at word and sentence levels, makes HEAL more in- paramount. Despite the recent progress in AI, challenges
terpretable for medical applications. In a NSCLC Overall such as class imbalance, integration of multimodal data,
Survival (OS) prediction case study, HEAL achieved an and robust generalization remain pervasive. In response
average -index of 0.639 and a low standard deviation to this we introduce a novel methodology that leverages
of 0.014 over 5 runs, showing a statistically significant the strengths of ensemble learning while incorporating
superiority with respect to manually extracted clinical advanced fusion techniques. For each of the 4
modalifeatures. ties of the tabular ADNI database, we train a series of</p>
          <p>Our second contribution, even if still at its prelimi- classifiers on varied class distributions followed by a late
nary steps, grounds on the fact that deep learning (DL) fusion strategy that integrates the diferent modalities to
approaches have demonstrated significant value in au- improve the results.
tomatically learning potentially relevant patterns from Our framework is evaluated on two diagnostic tasks
medical images, such as computed tomography (CT) [8]. (binary and ternary) and four binary prognostic tasks (at
Hence, in this study we explore a novel methodology 12, 24, 36, and 48 months) and compared with 12
statefor predicting OS in NSCLC patients using only CT im- of-the-art imbalanced data algorithms, achieving 97.04%
ages, aiming at a multitask architecture that encompasses g-mean on the binary diagnostic task and 90.81% g-mean
prognostic factors like Progression-Free Survival (PFS) on the 48-month prognostic task.
beyond predicting OS alone. The first steps in this
direction include producing a soft attention weighted feature
map for each input slice and highlighting the relevant</p>
        </sec>
      </sec>
      <sec id="sec-2-2">
        <title>2.4. Multi-Dataset Multi-Task Learning for COVID-19 Prognosis</title>
        <p>In COVID-19 context [9] in order to fight the scarcity of
large, labelled chest radiographic images (CXR) datasets,
we introduce a novel multi-dataset multi-task (MDMT)
training framework, by integrating correlated datasets
from disparate sources and assessing severity score to
classify prognostic severity groups [10, 11, 12], instead of
relying on datasets with multiple and correlated labelling
schemes. As illustrated in figure 1 a deep CNN takes
the images as input and branches into task-specific fully
connected output networks, to end with a multi-task loss
function incorporating an indicator function to exploit
multi-dataset integration.
Estimated
discountedreward
TD
error
Reward Function  "#$%"&amp;'(</p>
        <p>R = −α! False
*+  
−  
,01 ,10 ,01</p>
        <p>"#$%"&amp;'(
Δ = −"#$"#%$"%&amp;"'&amp;('( &gt; τ</p>
        <p>True
Psycho-Physical</p>
        <p>State</p>
        <sec id="sec-2-2-1">
          <title>Deep Reinforcement Learning (DRL); second, we are fur</title>
          <p>ther expanding the multimodal view integrating
information from video, audio and text with the
physiological data. Robust and fast stress detection approaches
Figure 1: Overview of the proposed Multi-Dataset Multi- can bring benefit in several contexts: from providing
Taansdktmwoodtaeslka-rscpheicteifcictufruel,lycocmonpnoescetdedbyneat wshoarkrehdebaadcsk,b on1ean d a targeted and more personal assistance to patients, to
  2 , for tasks  1 and  2, respectively, producing outputs  1 ensuring safety for workers, for instance, Air Trafic
Conand  2 . trollers (ATC) that endure high levels of psychological
pressure during their job impacting operational safety.</p>
          <p>Proceeding with a 5 cross-validation and leave one Our first approach [13] employs a new DRL model to
center out training, we evaluated the method across 18 identify stress indicators. We obtained this by leveraging
diferent CNN backbone on prognosis classification task a dynamic time observation window that expands each
and fine-tuning from BRIXIA dataset to AIforCOVID step of the learning process, asking the agent to choose
dataset task. Best average performance with statistical either to continue observing or to classify based on the
robustness achieved: 68.6% accuracy, 66.6% F1-score and information gathered until that point, trying to
mini68.5% g-mean for the 5 cross validation, and 65.7% accu- mize the amount of data required for decision-making.
racy, 64.3% F1-score and 66.0% g-mean for the leave-one- As depicted in the figure Figure 2, we adopted the Soft
center-out validation strategy. Future directions include Actor-Critic algorithm for its efectiveness in handling
new domains and the integration of XAI [6]. caopnprtionaucohuws
iathctdioantasapuagcemse.nIntataioLneaovne-thOenNe-oSnu-bEjEecGt-pOuubtlic dataset we outperformed existing solutions, showing
3. Multimodal AI to foster the power of DRL for early stress detection.
well-being On top of this approach we are exploring the larger
multimodal asset of the Ulm-Trier Social Stress Test
Stress, a response to physical and emotional demands, dataset (ULM-TSST, MuSe 2022 challenge), containing 41
is crucial in determining individuals’ well-being and if training, 14 validation and 14 test subjects, simulating a
unmanaged can lead to conditions such as anxiety, depres- job interview scenario, with audio, video, text, and
physsion and cardiovascular diseases. Also in this scenario iological data modalities, rated on arousal and valence
multimodal AI ofers a tool for proactive approach to stress parameters. The aim is to build a high performance
health management, in order to provide real-time moni- architecture that leverages non invasive modalities for
toring and interventions, thereby mitigating long-term stress detection that can be employed in work
environhealth risks associated with chronic stress. ments. In both cases the scarcity of large datasets that</p>
          <p>We are targeting stress detection from two perspec- provide a quantitative measurement of stress is still the
tives: first, we are focusing on maximising the stress level main challenge: we will try to face it considering the
prediction accuracy within the shortest possible time, ex- construction of robust and specific acquisition protocols
ploiting multimodal physiological time series data and to test the efectiveness of the developed approaches in</p>
        </sec>
      </sec>
    </sec>
    <sec id="sec-3">
      <title>4. Resilient AI</title>
      <p>Due to the high stakes involved in healthcare decisions,
the sensitivity of medical data, and the complexity of
medical environments, AI systems should be designed
to maintain their intended performance and integrity
in the face of adversities, as data corruption, missing
data, privacy leakage, or unexpected changes in their
operating environment. This is the goal of Resilient AI,
which is an aspect that cannot be left out when the aim Figure 3: Overall framework of the proposed method working
is to integrate AI for augmenting the medical practice. with triplet networks.</p>
      <p>To meat this goal we are currently investigating three
main aspects that fall under the Resilient AI umbrella:
developing systems robust to missing data, the challenge Last but not least, the challenge related to patient
priof limited extension datasets and how to protect sensitive vacy led us to explore Federated learning (FL). FL presents
patients data. an innovative solution to the challenge of protecting
sen</p>
      <p>With respect to the missing data challenge [14], al- sitive patient data in artificial intelligence applications
though a variety of strategies exist for addressing this in healthcare, in fact enabling the training of a shared
problem in health datasets, to overcome the obstacle to global model with a central server while ensuring data
select the most suitable one and their dependency on the privacy within local institutions. On this basis we
introdataset’s specifics, we developed a Transformer-based duce a new token-based FL paradigm, revolutionizing the
model [15] that applies masking to ignore the missing traditional approach with sequential or random passing
data, thus eliminating the need of imputation and dele- of a token between clients during each epoch. This
innotion techniques and focusing directly only on the avail- vative method allows only the token owner to send the
able features through self-attention. Moreover, we in- weights to the server, which redistributes them directly
troduced a novel feature-identifying form of positional to all models. By eliminating local training epochs and
encoding to facilitate the integration of tabular data into allowing immediate transmission, this paradigm shift
a Transformer framework. This method was validated streamlines the process by circulating a single model
through an overall survival classification task, employing among clients and also mitigating the need for an initial
clinical data from the CLARO [16] project and improving warm-up period, potentially paving the way for a
dethe prediction accuracy. centralized system that reduces dependence on a central</p>
      <p>In order to address the problem of working with server and minimizes the number of parameters
transdatasets limited in the extension, particularly frequent mitted in each iteration. Results on the tabular part of
in healthcare domain, Triplet networks, a subtype of the the AIforCOVID dataset [18] composed of 6 hospitals
Siamese networks, emerge as a promising solution, com- show that the performance of the FL model does not
deprising three identical networks operating concurrently. viate from that of its equivalent trained on all datasets
Throughout training of these three networks two inputs aggregated into a single pool. The next steps will focus
belong to the same class, whereas the third belongs to a on integrating other modalities into the FL pipeline, such
distinct class, with the final objective to develop a feature as CXR scans of the AIforCOVID dataset itself.
space with two distinct clusters one per class by
incorporating inter-class diversities alongside intra-class
similarities and providing scenarios with limited data with 5. AI for healthcare robotics
more triplets compared to instances (Figure 3). In our
study [17], using a private dataset of 86 CT scans, triplet The integration of robotics in healthcare settings
exemnetworks surpass the plain deep networks in accurately plifies another dimension of AI’s impact, automating
roupredicting the histological subtypes of NSCLC patients. tine tasks, assisting in surgeries with precision beyond
Currently, we are broadening the scope of our research human capability, and providing rehabilitation support
including PET images alongside CT scans and adopt- to patients. This not only enhances service delivery but
ing a multimodal strategy for the same classification. also alleviates the workload on healthcare professionals,
By integrating these complementary data we anticipate allowing them to focus more on patient-centered care.
achieving a significant improvement and overcoming the In this scenario we pursue two aims: first, enhancing
challenges posed by limited data scenarios. robotic surgery with real-time high precision
localization; second, boosting lower-limb robotic rehabilitation
optimizing the structural exoskeleton sensor
configuration.</p>
      <p>For the first objective we focus on the laparoscopy use
case, as one of the preferred surgical methods. Despite
recent advancements in image acquisition it is still limited
to rely on 2D images view: misinterpreting anatomical
structures due to this limit is a common source of
errors. In contrast, 3D imaging increases the accuracy of
instrument manipulation, leads to better outcomes in
surgery, and shortens the learning for trainees. Even
several research in surgical 3D imaging has been explored,
like camera-based tracking and mapping, Mosaicking,
Structure from Motion, and Shape from Template, they
often rely on simplifications that can limit their
efectiveness. On this ground Simultaneous Localization and
Mapping (SLAM) has shown promising results, as it aims
to create a map of the environment while localizing the
sensor position within it. Therefore we developed a
robust deep learning SLAM pipeline to operate in real-time
across diverse surgical settings by providing an
immersive, interactive 3D environment (Figure 4), allowing for
more precise and personalized interventions with the
future possibility to be integrated with augmented reality
displays.</p>
      <p>For the second objective, we focus on the challenges in
the field of lower limb robotics. It aims at supporting
people with lower limb disabilities by enhancing movement,
mobility, and providing targeted exercise. Technologies
as exoskeletons, prosthetics, and rehabilitation robots,
are particularly helpful for those with neurological issues,
ofering improved rehabilitation, independence, and
tailored care. Efective use requires precise control settings
to adapt walking patterns to diferent terrains.
Challenges in this field involve the extensive need for sensors
for terrain detection and the complexity in processing
sensor data. Simplifying sensor requirements to
accurately determine terrain and slope is critical for
userfriendly, eficient operation, and safety. The aim of our
work is to recognize the terrain on which an exoskeleton
is walking and its inclination. Among several
state-ofthe-art driven approaches, we achieved promising results
using LSTM architectures with IMU data (0.94 of
accuracy in Leave-one-out cross-validation), and CNN-LSTM
architectures with EMG data (0.75 of accuracy). The
fusion of IMU and EMG data didn’t bring any significant
improvement, as explanatory tests indicated that the best
20 contributing features belong to IMU. Next, by varying
the number of sensors, and therefore features, we noticed
that the best results are achieved by selecting the most
relevant features, from one to three, according to SHAP
(on a 3 subjects validation set), leading to 0.85, 0.89 and
0.93 of accuracy respectively. Lastly, we found that LSTM
and CNN-LSTM are valid architectures for slope
inclination prediction (MAE of 1.95°) and stair height (MAE of
15.65 mm), without significant diferences in employing
3 or 4 sensors.</p>
    </sec>
    <sec id="sec-4">
      <title>Acknowledgments</title>
      <sec id="sec-4-1">
        <title>Fatih Aksu, Alice Natalina Caragliano, Camillo Maria</title>
        <p>Caruso, Omar Coser, Arianna Francesconi, Leonardo
Furia, Guido Manni, Giustino Marino, Domenico Paolo
and Filippo Rufini are Ph.D. students enrolled in
the National Ph.D. in Artificial Intelligence, course
on Health and life sciences, organized by
Università Campus Bio-Medico di Roma. We
acknowledge financial support from: i) PNRR MUR project
PE0000013-FAIR; ii) PRIN 2022 MUR
20228MZFAAAIDA (CUP C53D23003620008); iii) PRIN PNRR 2022
MUR P2022P3CXJ-PICTURE (CUP C53D23009280001);
iv) FCS MISE (CUP B89J23000580005); v) MAECI (grant
n. CN23GR09); vi) NRR MUR project PNC0000007
Fit4MedRob. This work was also partially supported
by the following companies: Eustema S.p.A. and ENAV
S.p.A..
[3] J. P. Bharadiya, Artificial intelligence and the fu- [11] V. Guarrasi, N. C. D’Amico, R. Sicilia, E. Cordelli,
ture of web 3.0: Opportunities and challenges ahead, P. Soda, Pareto optimization of deep networks
American Journal of Computer Science and Tech- for covid-19 diagnosis from chest x-rays, Pattern
nology 6 (2023) 91–96. Recognition 121 (2022) 108242.
[4] V. Pereira, E. Hadjielias, M. Christofi, D. Vrontis, A [12] V. Guarrasi, P. Soda, Optimized fusion of cnns to
systematic literature review on the impact of artifi- diagnose pulmonary diseases on chest x-rays, in:
cial intelligence on workplace outcomes: A multi- International Conference on Image Analysis and
process perspective, Human Resource Management Processing, Springer, 2022, pp. 197–209.</p>
        <p>Review 33 (2023) 100857. [13] L. Furia, et al., Exploring early stress detection from
[5] V. Guarrasi, P. Soda, Multi-objective optimization multimodal time series with deep reinforcement
determines when, which and how to fuse deep learning, in: 2023 IEEE International Conference
networks: An application to predict covid-19 out- on Bioinformatics and Biomedicine (BIBM), IEEE,
comes, Computers in Biology and Medicine 154 2023, pp. 1917–1920.</p>
        <p>(2023) 106625. [14] A. Rofena, V. Guarrasi, M. Sarli, C. L. Piccolo,
[6] V. Guarrasi, L. Tronchin, D. Albano, E. Faiella, M. Sammarra, B. B. Zobel, P. Soda, A deep
learnD. Fazzini, D. Santucci, P. Soda, Multimodal ex- ing approach for virtual contrast enhancement in
plainability via latent shift applied to covid-19 strat- contrast enhanced spectral mammography, arXiv
ification, arXiv preprint arXiv:2212.14084 (2022). preprint arXiv:2308.00471 (2023).
[7] D. Paolo, et al., Named entity recognition in ital- [15] C. M. Caruso, V. Guarrasi, S. Ramella, P. Soda,
ian lung cancer clinical reports using transformers, A deep learning approach for overall survival
in: 2023 IEEE International Conference on Bioin- analysis with missing values, arXiv preprint
formatics and Biomedicine (BIBM), IEEE, 2023, pp. arXiv:2307.11465 (2023).</p>
        <p>4101–4107. [16] CLARO - CoLlAborative multi-sources
Radiopath[8] M. Tortora, et al., Radiopathomics: multimodal omics approach for personalized Oncology in
nonlearning in non-small cell lung cancer for adaptive small cell lung cancer., http://www.cosbi-lab.it/
radiotherapy, IEEE Access (2023). claro/, 2020. Accessed: 2023-03-20.
[9] G. Fiscon, F. Salvadore, V. Guarrasi, A. R. Garbuglia, [17] F. Aksu, et al., Early experiences on using triplet
P. Paci, Assessing the impact of data-driven lim- networks for histological subtype classification in
itations on tracing and forecasting the outbreak non-small cell lung cancer, in: 2023 IEEE 36th
Interdynamics of covid-19, Computers in biology and national Symposium on Computer-Based Medical
medicine 135 (2021) 104657. Systems (CBMS), IEEE, 2023, pp. 832–837.
[10] V. Guarrasi, N. C. D’Amico, R. Sicilia, E. Cordelli, [18] P. Soda, N. C. D’Amico, J. Tessadori, G. Valbusa,
P. Soda, A multi-expert system to detect covid-19 V. Guarrasi, C. Bortolotto, M. U. Akbar, R. Sicilia,
cases in x-ray images, in: 2021 IEEE 34th Inter- E. Cordelli, D. Fazzini, et al., Aiforcovid:
Predictnational Symposium on Computer-Based Medical ing the clinical outcomes in patients with covid-19
Systems (CBMS), IEEE, 2021, pp. 395–400. applying ai to chest-x-rays. an italian multicentre
study, Medical image analysis 74 (2021) 102216.</p>
      </sec>
    </sec>
  </body>
  <back>
    <ref-list>
      <ref id="ref1">
        <mixed-citation>
          [1]
          <string-name>
            <given-names>V.</given-names>
            <surname>Guarrasi</surname>
          </string-name>
          ,
          <string-name>
            <given-names>L.</given-names>
            <surname>Tronchin</surname>
          </string-name>
          ,
          <string-name>
            <given-names>C. M.</given-names>
            <surname>Caruso</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Rofena</surname>
          </string-name>
          ,
          <string-name>
            <given-names>G.</given-names>
            <surname>Manni</surname>
          </string-name>
          ,
          <string-name>
            <given-names>F.</given-names>
            <surname>Aksu</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Paolo</surname>
          </string-name>
          , G. Iannello,
          <string-name>
            <given-names>R.</given-names>
            <surname>Sicilia</surname>
          </string-name>
          ,
          <string-name>
            <given-names>E.</given-names>
            <surname>Cordelli</surname>
          </string-name>
          , et al.,
          <article-title>Building an ai-enabled metaverse for intelligent healthcare: opportunities and challenges</article-title>
          ,
          <source>in: Ital-IA</source>
          <year>2023</year>
          ,
          <article-title>Italia Intelligenza Artificiale Thematic Workshops, co-located with the 3rd</article-title>
          <source>CINI National Lab AIIS Conference on Artificial Intelligence (Ital IA</source>
          <year>2023</year>
          ), Pisa, Italy, May
          <volume>29</volume>
          -30,
          <year>2023</year>
          , CEUR-WS,
          <year>2023</year>
          , pp.
          <fpage>134</fpage>
          -
          <lpage>139</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref2">
        <mixed-citation>
          [2]
          <string-name>
            <given-names>E.</given-names>
            <surname>Cordelli</surname>
          </string-name>
          ,
          <string-name>
            <given-names>V.</given-names>
            <surname>Guarrasi</surname>
          </string-name>
          ,
          <string-name>
            <given-names>G.</given-names>
            <surname>Iannello</surname>
          </string-name>
          ,
          <string-name>
            <given-names>F.</given-names>
            <surname>Rufini</surname>
          </string-name>
          ,
          <string-name>
            <given-names>R.</given-names>
            <surname>Sicilia</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Soda</surname>
          </string-name>
          ,
          <string-name>
            <given-names>L.</given-names>
            <surname>Tronchin</surname>
          </string-name>
          ,
          <article-title>Making ai trustworthy in multimodal and healthcare scenarios</article-title>
          ,
          <source>Proceedings of the Ital-IA</source>
          (
          <year>2023</year>
          ).
        </mixed-citation>
      </ref>
    </ref-list>
  </back>
</article>