<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">Virtual Scanner: Leveraging Resilient Generative AI for Radiological Imaging in the Era of Medical Digital Twins</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Carolina</forename><surname>Adornato</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Cecilia</forename><surname>Assolito</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Ermanno</forename><surname>Cordelli</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Francesco</forename><forename type="middle">Di</forename><surname>Feola</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="department" key="dep1">Department of Diagnostics and Intervention, Radiation Physics</orgName>
								<orgName type="department" key="dep2">Biomedical Engineering</orgName>
								<orgName type="institution">Umeå University</orgName>
								<address>
									<postCode>90187</postCode>
									<settlement>Umeå</settlement>
									<country key="SE">Sweden</country>
								</address>
							</affiliation>
						</author>
						<author role="corresp">
							<persName><forename type="first">Valerio</forename><surname>Guarrasi</surname></persName>
							<email>valerio.guarrasi@unicampus.it</email>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Giulio</forename><surname>Iannello</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Lorenzo</forename><surname>Marcoccia</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Elena</forename><forename type="middle">Mulero</forename><surname>Ayllon</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Rebecca</forename><surname>Restivo</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Aurora</forename><surname>Rofena</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Rosa</forename><surname>Sicilia</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Paolo</forename><surname>Soda</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="department" key="dep1">Department of Diagnostics and Intervention, Radiation Physics</orgName>
								<orgName type="department" key="dep2">Biomedical Engineering</orgName>
								<orgName type="institution">Umeå University</orgName>
								<address>
									<postCode>90187</postCode>
									<settlement>Umeå</settlement>
									<country key="SE">Sweden</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Matteo</forename><surname>Tortora</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Lorenzo</forename><surname>Tronchin</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Engineering</orgName>
								<orgName type="laboratory">Research Unit of Computer Systems and Bioinformatics</orgName>
								<orgName type="institution">Università Campus Bio-Medico di Roma</orgName>
								<address>
									<addrLine>Via Àlvaro del Portillo 21</addrLine>
									<postCode>00128</postCode>
									<settlement>Rome</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="department" key="dep1">Department of Diagnostics and Intervention, Radiation Physics</orgName>
								<orgName type="department" key="dep2">Biomedical Engineering</orgName>
								<orgName type="institution">Umeå University</orgName>
								<address>
									<postCode>90187</postCode>
									<settlement>Umeå</settlement>
									<country key="SE">Sweden</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">Virtual Scanner: Leveraging Resilient Generative AI for Radiological Imaging in the Era of Medical Digital Twins</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">E62721C4EA59AB8F1A596BBB2AD227E9</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2025-04-23T16:54+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Medical Imaging</term>
					<term>Generative Artificial Intelligence</term>
					<term>Virtual Scanner</term>
					<term>Resilient AI</term>
					<term>Multimodal Learning</term>
					<term>Radiology</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Advancements in generative artificial intelligence (AI) are setting the stage for transformative changes in medical imaging, particularly through the development of the Virtual Scanner. This innovative approach leverages resilient generative AI to synthesize radiological images, addressing critical challenges in the field such as data scarcity, patient exposure to radiation, and the limitations of current imaging technologies. By harnessing the power of Generative Adversarial Networks (GANs) and focusing on the resilience of these algorithms, the Virtual Scanner aims to enhance diagnostic accuracy, improve patient care, and fill gaps in multimodal datasets. Our research explores both unimodal and multimodal techniques, including GAN ensembles, latent augmentation, and advanced texture synthesis, to create robust and adaptable generative models. Through extensive experimentation and analysis, we demonstrate the potential of the Virtual Scanner to revolutionize medical diagnostics by providing a safer, more efficient, and comprehensive imaging solution. The implications of this work extend beyond immediate medical applications, offering insights into the development of AI technologies capable of navigating the complexities of real-world data.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>In recent years, the intersection of artificial intelligence (AI) and healthcare has opened up novel possibilities for enhancing diagnostic accuracy, optimizing patient care, and tailoring treatment plans towards precision medicine. One of the most promising developments in this domain is the concept of the Medical Digital Twin, a virtual representation of a patient's health status, enabling personalized medical interventions and predictive healthcare analytics. Central to the utility and effectiveness of Medical Digital Twins is the capability for detailed and accurate radiological imaging, which provides a window into the internal workings of the human body without invasive procedures.</p><p>Radiological imaging, encompassing a range of modalities such as X-rays, MRI, and CT scans, plays a pivotal role in the diagnosis, monitoring, and treatment planning for a myriad of health conditions. However, the acquisition of these images often requires patients to undergo multiple scans, exposing them to potential risks associ-ated with radiation and contrast agents. Furthermore, the reliance on comprehensive multimodal imaging data presents challenges in scenarios where certain modalities are unavailable or unsuitable for some patients, leading to gaps in the data that can hinder diagnostic processes and the development of AI models in healthcare <ref type="bibr" target="#b0">[1,</ref><ref type="bibr" target="#b1">2,</ref><ref type="bibr" target="#b2">3]</ref>.</p><p>The advancement of generative AI, particularly through the deployment of Generative Adversarial Networks (GANs), offers a novel solution to these challenges. By enabling the virtual generation of radiological images where real ones are unavailable or undesirable, AI not only mitigates the risks to patients but also bridges the data gaps in multimodal learning applications <ref type="bibr" target="#b3">[4,</ref><ref type="bibr" target="#b4">5,</ref><ref type="bibr" target="#b5">6]</ref>. We introduce the concept of the Virtual Scanner as a cornerstone of the Medical Digital Twin paradigm, aiming to revolutionize the field of radiology by synthesizing highfidelity, modality-specific images through the power of AI, thus enhancing patient care and supporting radiologists in delivering more accurate diagnoses.</p><p>The scarcity of comprehensive radiological images presents significant challenges in medical diagnostics, affecting the efficacy of diagnostic processes and the development of AI tools. This scarcity arises from limited access to advanced imaging technologies, concerns over radiation exposure, and the difficulty of compiling diverse, multimodal datasets. Such challenges hinder the creation of effective AI models for diagnostics, impacting their accuracy and real-world applicability. The Virtual Scanner, leveraging resilient generative AI algorithms, addresses these issues by synthesizing radiological images to fill dataset gaps and reduce the need for repeated scans. Resilient generative AI refers to the development of models that not only excel in their designated tasks under ideal conditions but also maintain their performance when confronted with data that deviate from the norm, known as "data in the wild". Such resilience is crucial in ensuring that the AI tools developed for medical imaging are robust against the variations inherent in patient data across different demographics, equipment used, and pathological conditions.</p><p>By embedding resilience at the core of our generative AI algorithms, we aim to create a foundation for the Virtual Scanner that is not only technologically advanced but also reliable and effective across the spectrum of medical imaging needs. This approach positions our work not just as a technical achievement but as a meaningful contribution to the field of radiology, where the capacity to handle data in the wild can significantly enhance diagnostic processes and patient care.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Research Activities</head><p>Embarking on the journey to realize the Virtual Scanner, our investigation delves into a series of research activities, as shown in Figure <ref type="figure" target="#fig_11">1</ref>, each designed to push the boundaries of what's possible with generative AI in the field of radiology <ref type="bibr" target="#b6">[7,</ref><ref type="bibr" target="#b7">8]</ref>. These activities are categorized into two main areas: "Resilient Generative AI" and "Virtual Scanner Applications", enveloping a diverse array of methodologies and applications aimed at enhancing the generation and translation of medical imaging data. By addressing many aspects of generative AI, from improving algorithm resilience to creating virtual modalities, these activities underscore our commitment to advancing diagnostic capabilities and patient care through technological innovation.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.">Resilient Generative AI</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.1.">GAN Ensemble</head><p>In tackling the complexities of synthetic data generation within medical imaging, our research delves into optimizing generative AI through the use of GAN ensembles. This strategy is born from the necessity to overcome inherent limitations in single-model GAN applications, such as mode collapse and the inadequate representation of real data distributions, a common obstacle in generating high-quality and diverse medical images. The core of our approach lies in creating an ensemble of GANs that jointly optimizes the visual quality and diversity of synthetic images from a set of GANs. We aim to solve a Pareto multi-objective optimization problem that simultaneously covers the real training set, aiming to generate high-quality GANs and using as few GANs as possible.</p><p>We tested out methodology across three distinct medical datasets, employing 22 GANs with differing architectures, loss functions, and regularization techniques. Moreover, we uniformly sampled each model every 20000 training iterations, i.e., resulting in a total search space of 110 models. The experiments showcase that using synthetic datasets generated from such an ensemble improves the performances in classification downstream tasks compared to single GANs and Naive selection approaches, i.e., using all available 110 GANs or randomly selecting a subset.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.2.">LatentAugment</head><p>Data Augmentation (DA) is a crucial strategy in AI to enhance the volume and diversity of training datasets, thereby mitigating the risk of overfitting and bolstering model generalization to unseen data. Standard DA methods in image recognition tasks transform the images via geometric rigid and non-rigid transformations using image processing primitives, such as translation, rotation, cropping, etc. However, such transformations rely on human experts with prior knowledge of the dataset and fail to generate sufficiently diverse synthetic data. GANs offer a valuable addition to the available augmentation techniques. However, GANs generate high-quality samples rapidly, but they suffer from poor mode coverage, i.e., the variation and variety of the samples that can be generated, limiting their utility for DA purposes in the medical field.</p><p>We propose LatentAugment <ref type="bibr" target="#b8">[9]</ref>, a DA strategy that overcomes the low diversity of GANs, opening up for use in DA applications. LatentAugment addresses the threefold challenge of producing synthetic samples that are not only of high fidelity and quality but also diverse and rapidly generated. LatentAugment modifies the latent vectors of the real training set, moving them towards regions that maximize their diversity and fidelity. We applied LatentAugment to improve the performances of a downstream model performing of MRI-to-CT image translation. The results showed LatentAugment's superiority over common DA methods and naive GANsampling, i.e., creating data sampling from the GAN's latent space without any control.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.3.">Paired vs. Unpaired Image Translation</head><p>Image-to-Image translation in medical imaging presents a critical challenge due to the predominance of unpaired datasets, where the direct correspondence between source and target images is not established <ref type="bibr" target="#b9">[10]</ref>. While paired methods, e.g., Pix2Pix, rely on direct map-  </p><formula xml:id="formula_0">x x w0 = w⇤ w1 w2 W • • • wK = w Inversion Reconstruction wk G xk D Lf (wk) N ↵f k•k2 2 Llat(wk) N ↵lat W⇤ k•k2 2 Lpix(wk) N ↵pix X k•k2 2 Lperc(wk) N ↵perc (X ) L L(wk) update w N multiplication L addition k•k l2-distance D trained discriminator trained feature extractor G trained generator ! !! !! !|"| ⋯ " GAN Training !! !! " ! !! !! !! !|"| Generation !! !! !|"| ⋯ ⋯ Optimisation ⋯ !! " ! !|"| !! " ! !! !|"| !! !! !! " ! !|"| ⋯ ⋮ !! ⋮ Ensembles Multi-objective ! * !! !|"| " * !! &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " i e U b Z V W + m q a M L H W R Q a o C g C g G 1 j k = " &gt; A A A C A n i c b V D L S s N A F J 3 U V 6 2 v q C t x M 9 i K d V O S g o 9 l 0 Y U u K 9 g H N K F M J p N 2 6 C Q T Z i Z C C c W N v + L G h S J u / Q p 3 / o 2 T N g t t P T D M 4 Z x 7 u f c e L 2 Z U K s v 6 N g p L y y u r a 8 X 1 0 s b m 1 v a O u b v X l j w R m L Q w Z 1 x 0 P S Q J o x F p K a o Y 6 c a C o N B j p O O N r j O / 8 0 C E p D y 6 V + O Y u C E a R D S g G C k t 9 c 2 D i u N x 5 s t x q L / U 8 Q l T a F K 9 O T m t 9 M 2 y V b O m g I v E z k k Z 5 G j 2 z S / H 5 z g J S a Q w Q 1 L 2 b C t W b o q E o p i R S c l J J I k R H q E B 6 W k a o Z B I N 5 2 e M I H H W v F h w I V + k Y J T 9 X d H i k K Z L a k r Q 6 S G c t 7 L x P + 8 X q K C S z e l U Z w o E u H Z o C B h U H G Y 5 Q F 9 K g h W b K w J w o L q X S E e I o G w 0 q m V d A j 2 / M m L p F 2 v 2 e e 1 s 7 t 6 u X G V x 1 E E h + A I V I E N L k A D 3 I I m a A E M H s E z e A V v</formula><formula xml:id="formula_1">Y = " &gt; A A A C A n i c b V D L S s N A F J 3 U V 6 2 v q C t x M 9 i K d V O S g o 9 l U U G X F e w D m l A m k 0 k 7 d J I J M x O h h O L G X 3 H j Q h G 3 f o U 7 / 8 Z J m 4 V W D w x z O O d e 7 r 3 H i x m V y r K + j M L C 4 t L y S n G 1 t L a + s b l l b u + 0 J U 8 E J i 3 M G R d d D 0 n C a E R a i i p G u r E g K P Q Y 6 X i j y 8 z v 3 B M h K Y / u 1 D g m b o g G E Q 0 o R k p L f X O v 4 n i c + X I c 6 i 9 1 r g h T a F K 9 P j q u 9 M 2 y V b O m g H + J n Z M y y N H s m 5 + O z 3 E S k k h h h q T s 2 V a s 3 B Q J R T E j k 5 K T S B I j P E I D 0 t M 0 Q i G R b j o 9 Y Q I P t e L D g A v 9 I g W n 6 s + O F I U y W 1 J X h k g N 5 b y X i f 9 5 v U Q F 5 2 5 K o z h R J M K z Q U H C o O I w y w P 6 V B C s 2 F g T h A X V u 0 I 8 R A J h p V M</formula><formula xml:id="formula_2">I h x w I h x w p x v p x v || ⋅ ||1 p x v AM max • ( ) mean • ( ) || • ||F demux mux K Q V A * 𝛾 (a) (c) Gd ,𝜃 1 1</formula><p>Gd ,𝜃 </p><formula xml:id="formula_3">I h x w p x v p x v || ⋅ ||1 p x v AM max • ( ) mean • ( ) || • ||F demux mux K Q V A * 𝛾 (a) (c) Gd ,𝜃 1 1</formula><p>Gd ,𝜃 </p><formula xml:id="formula_4">I h x w I h x w p x v p x v || ⋅ ||1 p x v AM max • ( ) mean • ( ) || • ||F demux mux K Q V A * 𝛾 (a) (c) Gd ,𝜃 1 1 Gd ,𝜃 2 2</formula><p>Gd ,𝜃</p><formula xml:id="formula_5">p v n x n n x n . . . n x n h h h c MSTE MSTE (b) SA MSTE SA MSTLF sum 𝜉 𝛥H H H 𝛥H 𝛥H' I I H H or or I h x w I h x w p x v p x v || ⋅ ||1 p x v AM max • ( ) mean • ( ) || • ||F demux mux K Q V A * 𝛾 (a) (c) Gd ,𝜃 1 1</formula><p>Gd ,𝜃 </p><formula xml:id="formula_6">I h x w I h x w p x v p x v || ⋅ ||1 p x v AM max • ( ) mean • ( ) || • ||F demux mux K Q V A * 𝛾 (a) (c) Gd ,𝜃 1 1 Gd ,𝜃 2 2</formula><p>Gd ,𝜃</p><formula xml:id="formula_7">p v n x n n x n . . . n x n h h h c MSTE MSTE (b) SA MSTE SA MSTLF sum 𝜉 𝛥H H H 𝛥H 𝛥H' I I H H or or I h x w I h x w p x v p x v || ⋅ ||1 p x v AM max • ( ) mean • ( ) || • ||F demux mux K Q V A * 𝛾 (a) (c) Gd ,𝜃 1 1</formula><p>Gd ,𝜃 </p><formula xml:id="formula_8">I h x w I h x w p x v p x v || ⋅ ||1 p x v Gd ,𝜃 1 1 Gd ,𝜃 2 2</formula><p>Gd ,𝜃</p><formula xml:id="formula_9">p v n x n n x n . . . n x n h h h c MSTE MSTE (b) MSTE 𝛥H H H 𝛥H I I H H or or I h x w I h x w p x v p x v || ⋅ ||1 p x v Gd ,𝜃 1 1</formula><p>Gd ,𝜃      Our work introduces a novel approach to narrow the performance drop between paired and unpaired image translation methodologies. By integrating a novel paired virtual loss function into the unpaired CycleGAN framework, we enhance the stability and accuracy of unpaired image translation without necessitating direct image pairs. This innovation finds practical application in Low-Dose Computed Tomography (LDCT) denoising, a process aimed at reducing radiation exposure while maintaining image quality. Through this approach, we leverage the abundance of unpaired LDCT and Full-Dose CT (FDCT) images to validate our model, demonstrating its effectiveness in producing high-quality, denoised images that closely approximate FDCT standards, thereby mitigating health risks associated with radiation without compromising diagnostic integrity.</p><formula xml:id="formula_10">F J L C 7 7 Y W m L f 5 k Y x f X 1 9 J d e I = " &gt; A A A B 8 n i c b V B N S w M x E M 3 W r 1 q / q h 6 9 B I v g q e y K V I 9 F p X g R K t g P 2 C 4 l m 6 Z t a D Z Z k l m x L P 0 Z X j w o 4 t V f 4 8 1 / Y 9 r u Q V s f D D z e m 2 F m X h g L b s B 1 v 5 3 c y u r a + k Z + s 7 C 1 v b O 7 V 9 w / a B q V a M o a V A m l 2 y E x T H D J G s B B s H a s G Y l C w V r h 6 H r q t x 6 Z N l z J B x j H L I j I Q P I + p w S s 5 H e A P U F a q 9 3 c T b r F k l t 2 Z 8 D L x M t I C W W o d 4 t f n Z 6 i S c Q k U E G M 8 T 0 3 h i A l G j g V b F L o J I b F h I 7 I g P m W S h I x E 6 S z k y f 4 x C o 9 3 F f a l g Q 8 U 3 9 P p C Q y Z h y F t j M i M D S L 3 l T 8 z / M T 6 F 8 G K Z d x A k z S + a J + I j A o P P 0 f 9 7 h m F M T Y E k I 1 t 7 d i O i S a U L A p F W w I 3 u L L y 6 R 5 V v Y q 5 c r 9 e a l 6 l c W R R 0 f o G J 0 i D 1 2 g K r p F d d R A F C n 0 j F 7 R m w P O i / P u f M x b c 0 4 2 c 4 j + w P n 8 A R 1 3 k S k = &lt; / l a t e x i t &gt; FFDM &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " D Q V Y A n G G Y h k n g O z o R l D M O D C A o Z 8 = " &gt; A A A B 8 n i c b V B N S w M x E M 3 W r 1 q / q h 6 9 B I v g q e y K V I / F I n g R K t o P 2 C 4 l m 6 Z t a D Z Z k l m x L P 0 Z X j w o 4 t V f 4 8 1 / Y 9 r u Q V s f D D z e m 2 F m X h g L b s B 1 v 5 3 c y u r a + k Z + s 7 C 1 v b O 7 V 9 w / a B q V a M o a V A m l 2 y E x T H D J G s B B s H a s G Y l C w V r h q D b 1 W 4 9 M G 6 7 k A 4 x j F k R k I H m f U w J W 8 j v A n i C t X d / f T r r F k l t 2 Z 8 D L x M t I C W W o d 4 t f n Z 6 i S c Q k U E G M 8 T 0 3 h i A l G j g V b F L o J I b F h I 7 I g P m W S h I x E 6 S z k y f 4 x C o 9 3 F f a l g Q 8 U 3 9 P p C Q y Z h y F t j M i M D S L 3 l T 8 z / M T 6 F 8 G K Z d x A k z S + a J + I j A o P P 0 f 9 7 h m F M T Y E k I 1 t 7 d i O i S a U L A p F W w I 3 u L L y 6 R 5 V v Y q 5 c r d e a l 6 l c W R R 0 f o G J 0 i D 1 2 g K r p B d d R A F C n 0 j F 7 R</formula><formula xml:id="formula_11">G i K W v Q S E S 6 H R L D B F e s g R w F a 8 e a E R k K 1 g r H t Z n f e m T a 8 E j d 4 y R m g S R D x Q e c E r T S Q x f Z E 6 a 1 2 r R X L H l l b w 5 3 l f g Z K U G G e q / 4 1 e 1 H N J F M I R X E m I 7 v x R i k R C O n g k 0 L 3 c S w m N A x G b K O p Y p I Z o J 0 f v D U P b N K 3 x 1 E 2 p Z C d 6 7 + n k i J N G Y i Q 9 s p C Y 7 M s j c T / / M 6 C Q 6 u g 5 S r O E G m 6 G L R I B E u R u 7 s e 7 f P N a M o J p Y Q q r m 9 1 a U j o g l F m 1 H B h u A v v 7 x K m h d l v 1 K u 3 F 2 W q j d Z H H k 4 g V M 4 B x + u o A q 3 U I c G U J D</formula><formula xml:id="formula_12">m O E Q x U h 7 w X V 5 f 2 M L d E = " &gt; A A A B 8 X i c b V B N S 8 N A E N 3 U r 1 q / q h 6 9 B I v g q S Q i 1 W P R i w f F C v Y D 2 1 A 2 2 0 m 7 d L M J u x O x h P 4 L L x 4 U 8 e q / 8 e a / c d v m o K 0 P B h 7 v z T A z z 4 8 F 1 + g 4 3 1 Z u a X l l d S 2 / X t j Y 3 N r e K e 7 u N X S U K A Z 1 F o l I t X y q Q X A J d e Q o o B U r o K E v o O k P L y d + 8 x G U 5 p G 8 x 1 E M X k j 7 k g e c U T T S Q w f h C d O b 6 9 t x t 1 h y y s 4 U 9 i J x M 1 I i G W r d 4 l e n F 7 E k B I l M U K 3 b r h O j l 1 K F n A k Y F z q J h p i y I e 1 D 2 1 B J Q 9 B e O r 1 4 b B 8 Z p W c H k T I l 0 Z 6 q v y d S G m o 9 C n 3 T G V I c 6 H l v I v 7 n t R M M z r 2 U y z h B k G y 2 K E i E j Z E 9 e d / u c Q U M x c g Q y h Q 3 t 9 p s Q B V l a E I q m B D c + Z c X S e O k 7 F b K l b v T U v U i i y N P D s g h O S Y u O S N V c k V q p E 4 Y k e S Z v J I 3 S 1 s v 1 r v 1 M W v N W d n M P v k D 6 / M H p J G Q 6 g = = &lt; / l a t e x i t &gt; MLO &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " Q Y M y j O H c T U j B 7 O a 9 S X 9 P f B 8 p S x U = " &gt; A A A B 8 3 i c b V B N S 8 N A E J 3 U r 1 q / q h 6 9 B I v g q S Q i 1 W P R i w f F C v Y D m l A 2 2 0 2 7 d L M J u x O x h P 4 N L x 4 U 8 e q f 8 e a / c d v m o N U H A 4 / 3 Z p i Z F y S C a 3 S c L 6 u w t L y y u l Z c L 2 1 s b m 3 v l H f 3 W j p O F W V N G o t Y d Q K i m e C S N Z G j Y J 1 E M R I F g r W D 0 e X U b z 8 w p X k s 7 3 G c M D 8 i A 8 l D T g k a y R v 0 P G S P m N 1 c 3 0 5 6 5 Y p T d W a w / x I 3 J x X I 0 e i V P 7 1 + T N O I S a S C a N 1 1 n Q T 9 j C j k V L B J y U s 1 S w g d k Q H r G i p J x L S f z W 6 e 2 E d G 6 d t h r E x J t G f q z 4 m M R F q P o 8 B 0 R g S H e t G b i v 9 5 3 R T D c z / j M k m R S T p f F K b C x t i e B m D 3 u W I U x d g Q Q h U 3 t 9 p 0 S B S h a G I q m R D c x Z f / k t Z J 1 a 1 V a 3 e n l f p F H k c R D u A Q j s G F M 6 j D F T S g C R Q S e I I X e L V S 6 9 l 6 s 9 7 n r Q U r n 9 m H X 7 A + v g E i 2 J H E &lt; / l a t e x i t &gt; gMLO &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " J / T n s o E 3 g T U 5 d f w / Y B X V x R h C N m o = " &gt; A A A B 8 n i c b V B N S 8 N A E N 3 U r 1 q / q h 6 9 L B b B U 0 l E q s d i L x 4 r 2 F p o Q 9 l s J + 3 S T T b s T s Q S + j O 8 e F D E q 7 / G m / / G b Z u D t j 4 Y e L w 3 w 8 y 8 I J H C o O t + O 4 W 1 9 Y 3 N r e J 2 a W d 3 b / + g f H j U N i r V H F p c S a U 7 A T M g R Q w t F C i h k 2 h g U S D h I R g 3 Z v 7 D I 2 g j V H y P k w T 8 i A 1 j E Q r O 0 E r d Y b + H 8 I R Z o z H t l y t u 1 Z 2 D r h I v J x W S o 9 k v f / U G i q c R x M g l M 6 b r u Q n 6 G d M o u I R p q Z c a S B g f s y F 0 L Y 1 Z B M b P 5 i d P 6 Z l V B j R U 2 l a M d K 7 + n s h Y Z M w k C m x n x H B k l r 2 Z + J / X T T G 8 9 j M R J y l C z B e L w l R S V H T 2 P x 0 I D R z l x B L G t b C 3 U j 5 i m n G 0 K Z V s C N 7 y y 6 u k f V H 1 a t X a 3 W W l f p P H U S Q n 5 J S c E</formula><formula xml:id="formula_13">W d m G n u L u 3 f 3 B Y O j p u 6 T h V D J s s F r H q B F S j 4 B K b h h u B n U Q h j Q K B 7 W B 8 N / P b T 6 g 0 j + W D y R L 0 I z q U P O S M G i s 1 s n 6 p 7 F b c O c g q 8 X J S h h z 1 f u m r N 4 h Z G q E 0 T F C t u 5 6 b G H 9 C l e F M 4 L T Y S z U m l I 3 p E L u W S h q h 9 i f z Q 6 f k 3 C o D E s b K l j R k r v 6 e m N B I 6 y w K b G d E z U g v e z P x P 6 + b m v D G n 3 C Z p A Y l W y w K U 0 F M T G Z f k w F X y I z I L K F M c X s r Y S O q K D M 2 m 6 I N w V t + e Z W 0 L i t e t V J t X J V r t 3 k c B T i F M 7 g A D 6 6 h B v d Q h y Y w Q H i G V 3 h z H p 0 X 5 9 3 5 W L S u O f n M C f y B 8 / k D 6 0 + N C A = = &lt; / l a t e x i t &gt; y &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " 1 E r 5 M I E t 3 C m H L y e B M R z L 3 V E x U a w = " &gt; A A A B 8 H i c b V D L T g J B E O z F F + I L 9 e h l I z H x R H a N Q Y 9 E L h 4 x k Y e B D Z k d B p g w M 7 u Z 6 T W S D V / h x Y P G e P V z v P k 3 D r A H B S v p p F L V n e 6 u M B b c o O d 9 O 7 m 1 9 Y 3 N r f x 2 Y W d 3 b / + g e H j U N F G i K W v Q S E S 6 H R L D B F e s g R w F a 8 e a E R k K 1 g r H t Z n f e m T a 8 E j d 4 y R m g S R D x Q e c E r T S Q x f Z E 6 a 1 2 r R X L H l l b w 5 3 l f g Z K U G G e q / 4 1 e 1 H N J F M I R X E m I 7 v x R i k R C O n g k 0 L 3 c S w m N A x G b K O p Y p I Z o J 0 f v D U P b N K 3 x 1 E 2 p Z C d 6 7 + n k i J N G Y i Q 9 s p C Y 7 M s j c T / / M 6 C Q 6 u g 5 S r O E G m 6 G L R I B E u R u 7 s e 7 f P N a M o J p Y Q q r m 9 1 a U j o g l F m 1 H B h u A v v 7 x K m h d l v 1 K u 3 F 2 W q j d Z H H k 4 g V M 4 B x + u o A q 3 U I c G U J D</formula><formula xml:id="formula_14">m O E Q x U h 7 w X V 5 f 2 M L d E = " &gt; A A A B 8 X i c b V B N S 8 N A E N 3 U r 1 q / q h 6 9 B I v g q S Q i 1 W P R i w f F C v Y D 2 1 A 2 2 0 m 7 d L M J u x O x h P 4 L L x 4 U 8 e q / 8 e a / c d v m o K 0 P B h 7 v z T A z z 4 8 F 1 + g 4 3 1 Z u a X l l d S 2 / X t j Y 3 N r e K e 7 u N X S U K A Z 1 F o l I t X y q Q X A J d e Q o o B U r o K E v o O k P L y d + 8 x G U 5 p G 8 x 1 E M X k j 7 k g e c U T T S Q w f h C d O b 6 9 t x t 1 h y y s 4 U 9 i J x M 1 I i G W r d 4 l e n F 7 E k B I l M U K 3 b r h O j l 1 K F n A k Y F z q J h p i y I e 1 D 2 1 B J Q 9 B e O r 1 4 b B 8 Z p W c H k T I l 0 Z 6 q v y d S G m o 9 C n 3 T G V I c 6 H l v I v 7 n t R M M z r 2 U y z h B k G y 2 K E i E j Z E 9 e d / u c Q U M x c g Q y h Q 3 t 9 p s Q B V l a E I q m B D c + Z c X S e O k 7 F b K l b v T U v U i i y N P D s g h O S Y u O S N V c k V q p E 4 Y k e S Z v J I 3 S 1 s v 1 r v 1 M W v N W d n M P v k D 6 / M H p J G Q 6 g = = &lt; / l a t e x i t &gt; MLO</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.4.">Homogenization</head><p>In the domain of lung CT imaging, the heterogeneity of images stemming from varied scanners and reconstruction kernels poses a significant challenge. This variability can severely impact the performance of automated analysis tools, notably in tasks relying on deep learning models such as 3D Convolutional Neural Networks (CNNs), which are crucial for predicting patient outcomes like overall survival rates. To address this challenge, our work introduces an innovative approach based on StarGAN, a state-of-the-art image-to-image translation generative model, for the homogenization of lung CT images.</p><p>Our objective is to transform disparate lung CT images, regardless of their originating scanner types or reconstruction kernels, into a standardized format that retains critical diagnostic features while presenting a uniform appearance. By employing StarGAN, we leverage its capacity for multi-domain image translation to achieve the goal of not only enhancing the quality of the dataset but also to significantly improve the performance of downstream tasks. This approach paves the way for more generalized and robust AI tools in medical diagnostics, ultimately contributing to better patient care and outcomes.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.">Virtual Scanner Applications</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.1.">Virtual Contrast Enhancement (VCE)</head><p>In the evolving landscape of medical imaging, Contrast Enhanced Spectral Mammography (CESM) represents a significant advancement, offering detailed insights for breast cancer diagnosis by utilizing a dual-energy technique that integrates both low and high-energy images. This method, however, necessitates the administration of an iodinated contrast medium and subjects patients to higher radiation doses than standard mammography, raising concerns about potential side effects and increased radiation exposure.</p><p>Addressing these critical limitations, our work introduces a novel approach to VCE in CESM using deep generative models <ref type="bibr" target="#b10">[11]</ref>. By eliminating the need for contrast mediums and aiming to reduce radiation doses, this research not only mitigates the associated risks but also preserves the diagnostic benefits of CESM. Our methodology employs GAN, e.g., Pix2Pix or CycleGAN, to generate synthetic recombined images from solely low-energy images.</p><p>An extensive quantitative and qualitative analysis underpins our research, including evaluations by professional radiologists on a novel CESM dataset comprising 1,138 images. This dataset has been made publicly available to foster ongoing research and development in the field. Among the models tested, CycleGAN emerged as the most effective, showcasing its ability to produce high-quality synthetic recombined images that closely mimic those obtained with traditional contrast-enhanced techniques.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.2.">Virtual Treatment Planning in Lung Cancer</head><p>Monitoring the progression and response to therapy is fundamental in lung cancer treatment. Traditional approaches rely on a series of CT scans taken before and during treatment to evaluate the efficacy of the interventions. In our previous work <ref type="bibr" target="#b11">[12]</ref>, we developed an ODE-based Digital Twin by using patient-specific CT scans to train a deep reinforcement learning controller, which can adapt to different tissue aggressiveness and outperform the current radiotherapy clinical practice of uniform dose delivery. However, this methodology often exposes patients to additional radiation and can be logistically challenging. Our innovative research introduces a novel application of AI in virtual treatment planning, leveraging conditioned CycleGANs to simulate the potential progression of lung cancer treatment based on varying doses. By conditioning the CycleGAN on specific treatment doses, our model can generate virtual CT scans that predict how the patient's anatomy and the tumor itself might respond to different levels of treatment. This approach allows for the creation of a virtual time series of CT scans without the need for repeated radiation exposure. The ability to accurately forecast the treatment's progression through these synthetic scans offers a significant advantage in personalizing treatment plans, enabling more precise adjustments to therapy regimens based on predicted outcomes. By reducing the reliance on multiple physical CT scans and minimizing patient exposure to radiation, we pave the way for a more patient-centric approach to cancer treatment monitoring. Additionally, the predictive insights gained from this technology could significantly enhance decision-making processes in treatment planning, potentially improving patient outcomes in lung cancer care.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.3.">Whole-Body Translation from CT to PET</head><p>The integration of CT and PET scans is essential in oncological diagnostics, combining the structural clarity of CT with the metabolic insights of PET imaging. While CT scans provide detailed anatomical structure, PET scans offer a window into the metabolic activity within the body, making the combined PET/CT an invaluable tool in the diagnosis, staging, and management of cancer patients. Despite their clinical significance, the dual-modality approach of PET/CT scanning is not without drawbacks, e.g., additional radiation exposure and higher costs compared to CT-only scans. These limitations restrict the widespread availability of PET/CT imaging in numerous medical centers globally, underscoring the need for alternative methods that can replicate the integrative insights of PET/CT imaging while mitigating its drawbacks.</p><p>Recognizing the challenges inherent in translating CT images to PET, especially given the variability in translation effectiveness across different anatomical regions, our methodology introduces a district-specific approach. Drawing from the current literature, which suggests the potential for improved accuracy through organ-specific networks, we propose a novel strategy that segments whole-body images into four major anatomical districts. Each district is then processed through independently trained GANs to generate district-specific PET images. The final step involves stitching these district-specific PET images together to reconstruct a comprehensive whole-body PET scan.</p><p>Employing two GAN architectures, Pix2Pix and Cy-cleGAN, our approach facilitates a comparative analysis to evaluate the effectiveness and precision of the image translation process. Through standard evaluation metrics, we quantify the quality of the generated images, highlighting the advantages of our district-specific translation methodology over traditional approaches that rely on a single GAN trained on entire whole-body images. This innovative strategy not only promises to reduce the time, cost, and radiation exposure associated with PET/CT imaging but also offers a tailored approach that accounts for the unique characteristics of different anatomical regions.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.4.">Texture Loss</head><p>In the quest to enhance the quality of medical images through denoising, the application of GANs emerges as a promising task. Yet, a critical challenge lies in the GAN-based algorithms' capacity to accurately capture and replicate the intricate textural details inherent in medical images. This task's complexity is significantly amplified by the diverse and complex relationships that define image textures, making conventional denoising approaches inadequate for preserving or restoring finegrained textural fidelity.</p><p>Our research introduces a novel loss function tailored to address these limitations by exploiting the multiscale textural properties captured by the Gray-Level Cooccurrence Matrix (GLCM) <ref type="bibr" target="#b12">[13]</ref>. The GLCM, traditionally utilized in image processing to quantify texture, is redefined in our work as a differentiable module compatible with the gradient-based optimization of the GAN train-ing processes. By integrating a multi-scale, differentiable GLCM into the loss function, we facilitate a deeper understanding and recognition of complex textural information during the image generation phase.</p><p>Furthermore, the incorporation of a self-attention layer represents a pivotal innovation in our methodology, enabling the dynamic synthesis of texture information across various scales. This approach not only enhances the denoising capabilities of GANs but also ensures the preservation of essential textural details, thereby improving the diagnostic utility of the generated images.</p><p>Extensive experimental validation of our approach within the field of low-dose CT denoising, aimed at improving noisy CT scans while minimizing radiation exposure, underscores the efficacy of our proposed solution. Utilizing three publicly available datasets, including both simulated and real-world scenarios, our methodology demonstrates a notable improvement over traditional loss functions across a variety of GAN architectures.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.5.">Report Generation</head><p>Our work focuses on Automatic Medical Reporting (AMR) that, as fostered by escalating digitization of healthcare data and the mounting stress national healthcare systems, aims to produce diagnostic reports from biomedical data. The efforts are currently directed towards chest radiographs assessing solutions based on encoder-decoder and transformer-based models. Alongside all this, Quantum Artificial Intelligence represents a novel field whose theoretical superiority in data representation capabilities and processing speeds makes it the main technology we are forwarding our efforts to, with its numerous methodologies for healthcare, even if it still presents hardware immaturity, scalability issues, and substantial financial costs. Because its application in AMR is still unnavigated, we aim to develop an architecture that merges traditional encoder-decoder concepts with quantum computing, to transcribe features obtained using classical binary computation into quantum states, which are then entangled with quantum representations of the shifted predictions for computational and accuracy benefits.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Future Directions and Conclusion</head><p>As we stand on the verge of a new era in medical imaging, propelled by the advancements in generative AI, the journey of our exploration is ongoing. The groundwork laid by the Virtual Scanner and the development of resilient generative AI algorithms opens a myriad of pathways for future research and application. In the quest for further innovation, it is essential to delve deeper into the integra-tion of AI with emerging imaging technologies, aiming to enhance the precision, efficiency, and accessibility of diagnostic tools. Future research will focus on refining the algorithms for even greater resilience <ref type="bibr" target="#b13">[14]</ref>, enabling them to adapt more seamlessly to the vast diversity of medical imaging data. Additionally, exploring the potential for AI-driven predictive analytics in patient treatment plans presents a promising frontier, where the insights garnered from virtual scans could inform more personalized and effective treatment strategies. Moreover, the ethical considerations and data privacy concerns associated with deploying AI in healthcare require ongoing attention. Ensuring the security of patient data and the unbiased application of AI tools remains paramount as we advance.</p><p>In conclusion, the exploration into generative AI and the Virtual Scanner represents a significant leap toward revolutionizing medical imaging. As we move forward, the presented research activities and the technologies developed will undoubtedly pave the way for a future where diagnostics are more accurate, treatments are more personalized, and patient care is enhanced at every level.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head></head><label></label><figDesc>x p P x Y r w b H 7 P S g p H 3 7 I M / M D 5 / A C / 4 l q U = &lt; / l a t e x i t &gt; (G0) &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " s w 0 E E y z 7 c j T / 6 7 a 5 A m G 5 1 o 5 c h 3</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head></head><label></label><figDesc>r 6 R D s + Z P / k n a 9 Z p / W T m 7 r 5 c Z F H k c R 7 I M D U A U 2 O A M N c A O a o A U w e A B P 4 A W 8 G o / G s / F m v M 9 K C 0 b e s w t + w f j 4 B v 4 p l o U = &lt; / l a t e x i t &gt;</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_6"><head></head><label></label><figDesc>Virtual Contrast Enhancement Virtual Treatment Planning in Lung Cancer Whole-Body Translation from CT to PET Texture Loss &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " d 9 u s</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_7"><head></head><label></label><figDesc>m w P O i / P u f M x b c 0 4 2 c 4 j + w P n 8 A S 4 y k T Q = &lt; / l a t e x i t &gt; CESM &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " 1 E r 5 M I E t 3 C m H L y e B M R z L 3 V E x U a w = " &gt; A A A B 8 H i c b V D L T g J B E O z F F + I L 9 e h l I z H x R H a N Q Y 9 E L h 4 x k Y e B D Z k d B p g w M 7 u Z 6 T W S D V / h x Y P G e P V z v P k 3 D r A H B S v p p F L V n e 6 u M B b c o O d 9 O 7 m 1 9 Y 3 N r f x 2 Y W d 3 b / + g e H j U N F</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_8"><head></head><label></label><figDesc>w D K / w 5 m j n x X l 3 P h a t O S e b O Y Y / c D 5 / A O d l k H 4 = &lt; / l a t e x i t &gt; CC &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " s g 6 x N 5 b y m</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_9"><head></head><label></label><figDesc>4 9 c k T q 5 J U 3 S I p w o 8 k x e y Z u D z o v z 7 n w s W g t O P n N M / s D 5 / A F k 4 p F Y &lt; / l a t e x i t &gt; gCC &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " W c + S u w G u 1 g W P W B 5 J A P 5 o l p W F k a g = " &gt; A A A B 6 H i c b V B N S 8 N A E J 3 4 W e t X 1 a O X x S J 4 K o l I 9 V j 0 4 r E F + w F t K J v t p F 2 7 2 Y T d j R B K f 4 E X D 4 p 4 9 S d 5 8 9 + 4 b X P Q 1 g c D j / d m m J k X J I J r 4 7 r f z t r 6 x u b</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_10"><head></head><label></label><figDesc>w D K / w 5 m j n x X l 3 P h a t O S e b O Y Y / c D 5 / A O d l k H 4 = &lt; / l a t e x i t &gt; CC &lt; l a t e x i t s h a 1 _ b a s e 6 4 = " s g 6 x N 5 b y m</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_11"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Overview of the presented research activities.</figDesc></figure>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgments</head><p>Aurora Rofena and Lorenzo Marcoccia are Ph.D. students enrolled in the National Ph.D. in Artificial Intelligence, course on Health and life sciences, organized by Università Campus Bio-Medico di Roma. We acknowledge financial support from: i) PNRR MUR project PE0000013-FAIR; ii) PRIN 2022 MUR 20228MZFAA-AIDA (CUP C53D23003620008); iii) PRIN PNRR 2022 MUR P2022P3CXJ-PICTURE (CUP C53D23009280001); iv) FCS MISE (CUP B89J23000580005). This work was also partially supported by the following companies: Teleconsys S.p.A..</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">Assessing the impact of data-driven limitations on tracing and forecasting the outbreak dynamics of covid-19</title>
		<author>
			<persName><forename type="first">G</forename><surname>Fiscon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Salvadore</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">R</forename><surname>Garbuglia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Paci</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Computers in biology and medicine</title>
		<imprint>
			<biblScope unit="volume">135</biblScope>
			<biblScope unit="page">104657</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Multi-objective optimization determines when, which and how to fuse deep networks: An application to predict covid-19 outcomes</title>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Computers in Biology and Medicine</title>
		<imprint>
			<biblScope unit="volume">154</biblScope>
			<biblScope unit="page">106625</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<monogr>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Tronchin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Albano</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Faiella</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Fazzini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Santucci</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2212.14084</idno>
		<title level="m">Multimodal explainability via latent shift applied to covid-19 stratification</title>
				<imprint>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">A multi-expert system to detect covid-19 cases in x-ray images</title>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">C</forename><surname>D'amico</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sicilia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Cordelli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">IEEE 34th International Symposium on Computer-Based Medical Systems (CBMS), IEEE</title>
				<imprint>
			<date type="published" when="2021">2021. 2021</date>
			<biblScope unit="page" from="395" to="400" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Pareto optimization of deep networks for covid-19 diagnosis from chest x-rays</title>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">C</forename><surname>D'amico</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sicilia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Cordelli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Pattern Recognition</title>
		<imprint>
			<biblScope unit="volume">121</biblScope>
			<biblScope unit="page">108242</biblScope>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Optimized fusion of cnns to diagnose pulmonary diseases on chest x-rays</title>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">International Conference on Image Analysis and Processing</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2022">2022</date>
			<biblScope unit="page" from="197" to="209" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">Building an ai-enabled metaverse for intelligent healthcare: opportunities and challenges</title>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Tronchin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">M</forename><surname>Caruso</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Rofena</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Manni</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Aksu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Paolo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Iannello</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sicilia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Cordelli</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Ital-IA 2023, Italia Intelligenza Artificiale Thematic Workshops, co-located with the 3rd CINI National Lab AIIS Conference on Artificial Intelligence (Ital IA 2023)</title>
				<meeting><address><addrLine>Pisa, Italy</addrLine></address></meeting>
		<imprint>
			<publisher>CEUR-WS</publisher>
			<date type="published" when="2023">May 29-30, 2023. 2023</date>
			<biblScope unit="page" from="134" to="139" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Making ai trustworthy in multimodal and healthcare scenarios</title>
		<author>
			<persName><forename type="first">E</forename><surname>Cordelli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Iannello</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Ruffini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sicilia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Tronchin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Ital-IA</title>
				<meeting>the Ital-IA</meeting>
		<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<monogr>
		<author>
			<persName><forename type="first">L</forename><surname>Tronchin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">H</forename><surname>Vu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Löfstedt</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2307.11375</idno>
		<title level="m">Latentaugment: Data augmentation via guided manipulation of gan&apos;s latent space</title>
				<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">A comparative study between paired and unpaired Image Quality Assessment in Low-Dose CT Denoising</title>
		<author>
			<persName><forename type="first">F</forename><surname>Di Feola</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Tronchin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">IEEE 36th International Symposium on Computer-Based Medical Systems (CBMS), IEEE</title>
				<imprint>
			<date type="published" when="2023">2023. 2023</date>
			<biblScope unit="page" from="471" to="476" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<monogr>
		<author>
			<persName><forename type="first">A</forename><surname>Rofena</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Sarli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">L</forename><surname>Piccolo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Sammarra</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">B</forename><surname>Zobel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2308.00471</idno>
		<title level="m">A deep learning approach for virtual contrast enhancement in contrast enhanced spectral mammography</title>
				<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Deep reinforcement learning for fractionated radiotherapy in non-small cell lung carcinoma</title>
		<author>
			<persName><forename type="first">M</forename><surname>Tortora</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Cordelli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sicilia</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Miele</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Matteucci</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Iannello</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Ramella</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Artificial Intelligence in Medicine</title>
		<imprint>
			<biblScope unit="volume">119</biblScope>
			<biblScope unit="page">102137</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<monogr>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">D</forename><surname>Feola</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Tronchin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2403.16640</idno>
		<title level="m">Multi-Scale Texture Loss for CT denoising with GANs</title>
				<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<monogr>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">M</forename><surname>Caruso</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Guarrasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Ramella</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Soda</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2307.11465</idno>
		<title level="m">A deep learning approach for overall survival analysis with missing values</title>
				<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
