<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Archiving and Interchange DTD v1.0 20120330//EN" "JATS-archivearticle1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-title-group>
        <journal-title>International Journal of Ma</journal-title>
      </journal-title-group>
    </journal-meta>
    <article-meta>
      <contrib-group>
        <contrib contrib-type="author">
          <string-name>Maura Pintor</string-name>
          <email>maura.pintor@unica.it</email>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
          <xref ref-type="aff" rid="aff4">4</xref>
          <xref ref-type="aff" rid="aff5">5</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Giulia Orrú</string-name>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
          <xref ref-type="aff" rid="aff4">4</xref>
          <xref ref-type="aff" rid="aff5">5</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Davide Maiorca</string-name>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
          <xref ref-type="aff" rid="aff4">4</xref>
          <xref ref-type="aff" rid="aff5">5</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Ambra Demontis</string-name>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
          <xref ref-type="aff" rid="aff4">4</xref>
          <xref ref-type="aff" rid="aff5">5</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Luca Demetrio</string-name>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff1">1</xref>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
          <xref ref-type="aff" rid="aff4">4</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Gian Luca Marcialis</string-name>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
          <xref ref-type="aff" rid="aff4">4</xref>
          <xref ref-type="aff" rid="aff5">5</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Battista Biggio</string-name>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
          <xref ref-type="aff" rid="aff4">4</xref>
          <xref ref-type="aff" rid="aff5">5</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Fabio Roli</string-name>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff1">1</xref>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
          <xref ref-type="aff" rid="aff4">4</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <aff id="aff0">
          <label>0</label>
          <institution>5 assistant professors (Dr. Ambra Demontis</institution>
          ,
          <addr-line>Dr. Davide</addr-line>
        </aff>
        <aff id="aff1">
          <label>1</label>
          <institution>Department of Informatics</institution>
          ,
          <addr-line>Bioengineering, Robotics, and Systems Engineering</addr-line>
          ,
          <institution>University of Genova</institution>
        </aff>
        <aff id="aff2">
          <label>2</label>
          <institution>Giorgio Giacinto</institution>
          ,
          <addr-line>Prof. Battista Biggio, Prof. Luca Di-</addr-line>
        </aff>
        <aff id="aff3">
          <label>3</label>
          <institution>Machine Learning, Adversarial Machine Learning</institution>
          ,
          <addr-line>Biometrics, Cybersecurity</addr-line>
        </aff>
        <aff id="aff4">
          <label>4</label>
          <institution>Maiorca</institution>
          ,
          <addr-line>Dr. Giulia Orrú, Dr. Maura Pintor, Dr. Lorenzo</addr-line>
        </aff>
        <aff id="aff5">
          <label>5</label>
          <institution>Pattern Recognition and Applications Laboratory (PRALab), Department of Electrical and Electronic Engineering, University of Cagliari</institution>
          ,
          <country country="IT">Italy</country>
        </aff>
        <aff id="aff6">
          <label>6</label>
          <institution>daci</institution>
          ,
          <addr-line>Prof. Giorgio Fumera, Prof. Gian Luca Marcialis)</addr-line>
        </aff>
        <aff id="aff7">
          <label>7</label>
          <institution>models developed at the Pattern Recognition and Applications Laboratory (PRALab) of the University of Cagliari. Our findings</institution>
        </aff>
      </contrib-group>
      <pub-date>
        <year>2022</year>
      </pub-date>
      <volume>13358</volume>
      <fpage>29</fpage>
      <lpage>31</lpage>
      <abstract>
        <p>scenarios. We present here the main research topics and activities on the design, security, safety, and robustness of machine learning have significantly contributed to identifying and characterizing the vulnerability of such models to adversarial attacks in the context of real-world applications and proposing robust techniques to make these models more reliable in security-critical The Pattern Recognition and Applications (PRA) Labora- butions in the area of AI/ML security, being the first to mission is to address fundamental issues for the develop- to mitigate them, playing a leading role in the establish∗Corresponding author.</p>
      </abstract>
    </article-meta>
  </front>
  <body>
    <sec id="sec-1">
      <title>-</title>
      <p>1. Research Group
tory was founded in 1996. The PRALab has been active
for more than 20 years at the University of Cagliari. Its
ment of future pattern recognition systems in the context
of real applications, focused on creating secure systems
for security applications, as reflected by our motto:
there is nothing more practical than a good
theory, by Kurt Lewin.</p>
      <p>Our activities can be categorized into four
highlyinterdependent lines: (i) development of theories to solve
problems of fundamental research, including multiple
classifier systems (our original expertise) and
adversarial machine learning; (ii) application of these theories
to solve practical problems in the research domains of
computer vision for video surveillance and ambient
intelligence, computer security, biometrics, document and
multimedia categorization, and cybersecurity; (iii) testing
and validation of the proposed solutions on real-world
data (in-vivo experiments); and (iv) development of
prototypes and demonstrators, through which the results of
basic research are translated into functional products.</p>
    </sec>
    <sec id="sec-2">
      <title>The PRALab team, led by the lab director (Prof. Fabio</title>
    </sec>
    <sec id="sec-3">
      <title>Roli), consists of 1 full and 4 associate professors (Prof.</title>
      <sec id="sec-3-1">
        <title>2. Research Topics</title>
        <p>
          We are among the first to have studied the impact of
adversarial machine learning on security applications such
as the analysis of malware [
          <xref ref-type="bibr" rid="ref1 ref4 ref5">1, 4, 5</xref>
          ] (Sect. 2.1). We apply
robust techniques to improve malware detection on
different settings (Sect. 2.2). Furthermore, we investigate
techniques to improve the robustness of learning-based
systems used for fingerprint, facial and behavioral
biometrics, and we organize every two years a challenge to
propose and fairly compare newly-developed techniques
for reliable fingerprint authentication ( Sect. 2.3).
        </p>
        <sec id="sec-3-1-1">
          <title>2.1. Machine Learning Security</title>
          <p>
            Recent progress in AI/ML technologies has reported
impressive performances in many tasks, paving the way for
their use in safety-critical applications like autonomous
driving and cybersecurity. Unfortunately, it has been
shown that AI/ML technologies are vulnerable to
wellcrafted attacks performed by skilled attackers. The key to
understanding the security properties of AI/ML
technologies is to model the threats, simulate the corresponding
attacks, and assess the security properties of the system
in these scenarios [
            <xref ref-type="bibr" rid="ref3">3</xref>
            ]. As shown in Fig. 1, attacks on
          </p>
        </sec>
      </sec>
    </sec>
    <sec id="sec-4">
      <title>AI/ML models can be staged both at testing and at training time.</title>
    </sec>
    <sec id="sec-5">
      <title>Evasion Attacks. In this scenario, attackers modify the</title>
      <p>Training data
Evasion</p>
      <p>Learning
Classification</p>
    </sec>
    <sec id="sec-6">
      <title>Other Attacks. We have also been investigating other</title>
      <p>attacks on AI/ML models, including backdoor
poisoning [19] and reprogramming [20].</p>
      <sec id="sec-6-1">
        <title>2.2. Machine Learning for Robust</title>
      </sec>
      <sec id="sec-6-2">
        <title>Malware Detection</title>
        <p>Malicious
Legitimate
Test data</p>
      </sec>
    </sec>
    <sec id="sec-7">
      <title>Machine learning technologies can indeed help in fight</title>
      <p>ing the wide-spreading of malicious software that infect
and harm devices of users. Hence, we focus on the
development of smarter detectors that better spots threats in
the wild. We analyse the impact of Infection vectors,
nonbinary files that carry out additional malicious codes (e.g.,
PDF and ActionScript files) [ 21, 22, 23, 24] by leveraging
static analysis techniques to extract meaningful features
that can be useful for classification. Also, we focus on
detecting malicious content stored in Binary Programs on
X86-64 [25] and Android Applications [26, 27, 28, 29], by
investigating how specific API calls (e.g., system-based
and crypto-related) can be used as features to
discriminate between malicious and benign programs.</p>
      <p>Our research also focused on understanding the
robustness of such detectors against test-time evasion
attacks. We were among the first to test both white-box
and black-box attacks by constructing working samples
that reflected the attacker’s modifications. Our research
covers the attacking strategies and the manipulations,
addressing four major directions.
input samples of the system to have them misclassified.</p>
      <p>
        Our researchers were the first to show that some popular
classification algorithms like Support Vector Machines,
Neural Networks [
        <xref ref-type="bibr" rid="ref1">1</xref>
        ], and feature selection algorithms are
vulnerable to this attack [
        <xref ref-type="bibr" rid="ref6">6</xref>
        ]. In [7], our researchers have
shown how to improve the eficiency of these attacks
while obtaining comparable performance. Depending on
the considered scenario, the attacks have to be adapted to
be suficiently realistic. For example, our researchers
developed evasion attacks to construct malware that evade
the target system while preserving all their
functionalities [
        <xref ref-type="bibr" rid="ref5">5, 8, 9, 10</xref>
        ]. Furthermore, attackers often do not Robustness of Windows Malware Detectors. We
know all the details regarding the target systems. Our study the manipulations of Windows programs [30, 9,
researchers have shown that efective attacks can also be 10, 31], and we develop attacks in both white-box and
developed in this challenging scenario [11, 9, 10, 12]. black-box settings. Also, these strategies have been also
      </p>
      <p>
        Unfortunately, these empirical evaluations are often used to test the robustness of commercial products hosted
not correctly conducted [
        <xref ref-type="bibr" rid="ref3">3</xref>
        ], and this leads to overes- on VirusTotal,1 highlighting that many of them are
sustimating the robustness of the considered system: the ceptible to adversarial perturbations as well.
system seems robust only because the attack fails. Our
researchers have developed methodologies and debugging
tools that can be used to improve current approaches
for empirical security evaluations to make them more
reliable [13, 14].
      </p>
      <p>
        Robustness of Android Malware Detectors. We
investigated how Android applications can be modified to
inject adversarial content inside them, by adding, e.g.,
fake permissions and API calls. Our studies focus on
stateof-the-art detectors, and we apply our findings against
them [
        <xref ref-type="bibr" rid="ref5">5, 32</xref>
        ].
      </p>
      <p>
        Poisoning Attacks. AI/ML technologies are often
retrained during the operative phase to consider changes
in the data distribution. In this scenario, an attacker
can compromise the training data by injecting samples
specifically devised to compromise the learning process,
making the classifier unable to classify samples at test
time correctly. Our researchers have been the first to
show that the Support Vector Machines [
        <xref ref-type="bibr" rid="ref2">2</xref>
        ], as well as
neural networks [15], feature selection methods [16], and
clustering algorithms [17] can be compromised by this
attack. Furthermore, our researchers have shown that
it is possible to find approximate solutions with smaller
computational cost and a comparable efectiveness [ 15,
18].
      </p>
      <p>
        Robustness of PDF Malware Detectors. We studied
the PDF file format and the practical evasive
manipulations that allow keeping all the relevant information of
the original file [
        <xref ref-type="bibr" rid="ref4">4, 33, 23</xref>
        ], in both white-box and
blackbox settings.
      </p>
      <p>
        Mitigation of Adversarial Attacks. After having
investigated the weaknesses of machine learning models,
we devised novel techniques that are robust to these
kinds of perturbations. Specifically, we focus on
forcing the attackers to drastically increase their efort to
1https://virustotal.com
bypass detection [
        <xref ref-type="bibr" rid="ref5">5</xref>
        ]. When unconstrained, trained lin- Among the numerous challenges that our researchers
ear models tend to rely only upon a few discriminating face, we want to highlight the explainable AI for
biometfeatures to make their predictions. This enables the at- rics topic, which is an aspect of fundamental importance
tacker to easily bypass detection by perturbing only such for designing reliable and understandable recognition
few, highly-relevant features. To mitigate this issue and and classification systems, especially from the point of
improve robustness, we introduced a theoretically-sound view of forensic analysis.
regularization term that provides the optimal, robust
linear model against such attacks. This classifier practically
works by forcing the optimizer to redistribute the impor- 3. Projects
tance of many input features, bounding the maximum
absolute weight value assigned to each of them. This in
turn constrains the attacker to manipulate more features
to bypass detection, efectively improving robustness.
      </p>
    </sec>
    <sec id="sec-8">
      <title>Our research activities are carried out in the framework</title>
      <p>
        of regional, national, and European projects funded by
public and private initiatives. We had more than
twentyifve projects founded between 2012 and 2020. The full list
Explainable Malware Detection. The focus of this re- is available at http://pralab.diee.unica.it/en/Projects. Six
search direction is twofold. First, we include techniques of them were founded by the European Commission, and
that can be easily explained and verified inside the devel- two of them were coordinated by the PRALab. Overall,
opment of malware detectors, allowing us to understand we received 3 million euros of funding, whose the
Eurowhy a model flags a program as malicious [
        <xref ref-type="bibr" rid="ref5">34, 5</xref>
        ]. Second, pean Commission provided half. The annual turnover is
we dissect the reason why adversarial attacks succeed around four hundred thousand euros.
against the model under test [30], by leveraging explain- We have diferent ongoing projects on AI security:
ability techniques proposed in the state of the art.
      </p>
      <sec id="sec-8-1">
        <title>2.3. Biometrics</title>
        <p>One of our main activities in the field of biometrics, is
the design of methods and models for detecting attacks
that compromise the authenticity of personal identity,
particularly fingerprint and facial identity. Indeed,
authentication systems are vulnerable to the submission
of artificial replicas of the biometric trait on the sensor,
known as presentation attacks (PAs). Our researchers, in
addition to evaluating the dangers of new
manufacturing techniques for PAs from latent fingerprints [ 35] or
through adversarial techniques [36], are involved in the
development of appropriate detectors, and their
integration into current fingerprint authentication systems [ 37].
Since 2009, we are organizers of the International
Fingerprint Liveness Detection Competition (LivDet)2. LivDet
is a biennial appointment for companies and research
institutions with the aim of assessing the performance
of state-of-the-art fingerprint PA detection systems.</p>
        <p>Moreover, we study and implement new techniques
for deepfake detection, i.e. methods to detect
manipulations of facial identity obtained through deep learning
techniques in video content, [38].</p>
        <p>In addition to the study of recognition systems through
strong biometrics, our researchers are specialized in
behavioural biometrics. In particular, through the study of
natural movements of people such as the speed of
creation or disintegration of groups of individuals [39], it is
possible to detect the emergence of anomalies, such as
episodes of violence or panic.
2http://livdet.diee.unica.it
1. 2023-2026 - Sec4AI4Sec aims to devise the testing
and protection of AI-enabled components in
software security assets. The project will start in late
2023.
2. 2022-2026 - “ELSA: European Lighthouse on
Secure and Safe AI,” funded by the EU Horizon
Europe research and innovation programme (grant
no. 101070617).
3. 2020-2023 - FFG COMET Module S3AI:
“Security and Safety for Shared Artificial Intelligence,”
funded by BMK, BMDW, and the Province of
Upper Austria in the frame of the COMET
Programme managed by the Austrian Research
Promotion Agency FFG. This project aims to provide
the foundations required to build secure and safe
shared artificial intelligence systems.
4. 2019-2023 - PRIN 2017 BullyBuster, funded by
the Italian Ministry of Education, University and
Research (CUP: F74I19000370001). The project
aims to provide AI-based solutions against the
phenomenon of bullying and cyberbullying.
5. 2020-2022 - PRIN 2017 RexLearn: “Reliable and
Explainable Machine Learning,” funded by the
Italian Ministry of Education, University and
Research (grant no.2017TWNMH2). This project
aims to develop novel learning paradigms, able
to take reliable and explainable decisions, and to
assess and mitigate the security risks associated
with potential misuses of machine learning.</p>
      </sec>
    </sec>
    <sec id="sec-9">
      <title>Some other relevant projects are listed in the following: • 2017-2019 - Research and Innovation Action LETS-CROWD: “Law Enforcement agencies human factor methods and Toolkit for the Secu</title>
      <sec id="sec-9-1">
        <title>4. Developed Tools</title>
        <p>Machine Learning Security and Robust Malware
Detection. As explained in the previous section,
correctly evaluating the robustness of AI/ML technologies
might be challenging. Our researchers have developed
diferent tools that help to perform security evaluation 3.
These tools include SecML [40], a Python library to assess
the security evaluation of AI/ML technologies against
evasion and poisoning attacks, and an extension of this
library, called SecML Malware [31] ad-hoc for Windows
malware. For each of them, they have released a tool
that evaluates security through a graphical interface:
PandaVision, and ToucanStrike. Furthermore, our
researchers have released a tool to evaluate if an attack is
efective in the considered scenario 4.</p>
      </sec>
    </sec>
    <sec id="sec-10">
      <title>Biometrics. In all research fields and in the technology</title>
      <p>transfer projects they lead, the researchers of the
biometrics unit provided proofs-of-concept and tools. For
example, within two projects funded by the Italian
Presidency of Minister, Scientific Division, our researchers
have developed the Fingerprint Forensic tool and the
Deepfake detection tool. The first is a tool of advanced
ifngerprint image processing techniques, including
detecting fingerprint PA coming from latent marks. The
second is a tool that combines diferent state-of-the-art
deepfake detection algorithms to exploit complementary
information in assessing the authenticity of multimedia
content.</p>
      <sec id="sec-10-1">
        <title>5. Challenges and Perspectives</title>
        <p>rity and protection of CROWDs in mass
gatherings”. Call: H2020 - SEC-07-FCT-2016-2017.</p>
        <p>Grant Agreement H2020/N.740466. While research is quickly progressing in AI/ML Security,
• 2015-2018 – Innovation Action DOGANA: “aD- companies are working on automating the development
vanced sOcial enGineering And vulNerability As- and operations of ML models (MLOps) without focusing
sessment Framework”. Call: H2020 – DS 2014-1. too much on ML security-related issues. In this respect, a
Grant Agreement H2020/N.653618. relevant challenge for the future will be to extend the
cur• 2014-2016 – CSA CyberROAD: “Development rent MLOps paradigm and also to encompass ML security
of the Cybercrime and Cyberterrorism Research (towards implementing what we refer to as MLSecOps).
Roadmap”. Call: FP7 – SEC 2013.2.5-1. Grant To this end, we plan to incorporate research on security
Agreement FP7-SEC-2013/N.607642. testing, protection, and monitoring of AI/ML models into
• 2014-2016 - ILLBuster, “Buster of ILLegal the MLOps development cycle. In particular, we plan to
Contents spread by malicious computer net- extend our research towards: (i) developing and
improvworks”. DGHOME - ISEC, Prevention of ing attacks (including evasion, poisoning, and privacy
and Fight Against Crime. Grant Agreement: threats) for making security testing and validation of
HOME/2012/ISEC/AG/4000004360. AI/ML models more eficient and available for a wider set
of application domains; (ii) designing improved defenses
• 2013-2015 - MAVEN: “Management and Authen- with robustness guarantees to protect AI/ML models not
ticity Verification of Multimedia Contents”. Call: only against such attacks but also to enable reliable
clasFP7-SME- 2013-1. Grant Agreement FP7-SME- sification when out-of-distribution data is provided as
2013-1/N.606058. input; and (iii) designing methods that constantly
monitor if a deployed model is under attack during operation,
enabling prompt reaction when needed. We firmly
believe that integrating these dimensions into an MLSecOps
cycle will definitely help software engineers and
developers to seamlessly deploy and maintain more secure,
reliable, and trustworthy AI/ML models in practice.</p>
      </sec>
    </sec>
    <sec id="sec-11">
      <title>3https://github.com/pralab</title>
      <p>4https://github.com/pralab/IndicatorsOfAttackFailure
versarial feature selection against evasion attacks, 3140451. doi:1 0 . 1 1 4 5 / 3 1 2 8 5 7 2 . 3 1 4 0 4 5 1 .</p>
      <p>IEEE Trans. on Cybernetics 46 (2016) 766–777. [16] H. Xiao, B. Biggio, G. Brown, G. Fumera, C. Eckert,
[7] M. Pintor, F. Roli, W. Brendel, B. Biggio, Fast F. Roli, Is feature selection secure against training
Minimum-norm Adversarial Attacks through Adap- data poisoning?, in: ICML, 2015, pp. 1689–1698.
tive Norm Constraints, in: A. Beygelzimer, [17] B. Biggio, I. Pillai, S. R. Bulò, D. Ariu, M. Pelillo,
Y. Dauphin, P. Liang, J. W. Vaughan (Eds.), Ad- F. Roli, Is data clustering in adversarial settings
vances in Neural Information Processing Systems, secure?, in: Proc. of the 2013 AISec, AISec ’13, New
2021. York, NY, USA, 2013, pp. 87–98.
[8] B. Kolosnjaji, A. Demontis, B. Biggio, D. Maiorca, [18] A. E. Cinà, S. Vascon, A. Demontis, B. Biggio,
G. Giacinto, C. Eckert, F. Roli, Adversarial Mal- F. Roli, M. Pelillo, The Hammer and the Nut: Is
ware Binaries: Evading Deep Learning for Mal- Bilevel Optimization Really Needed to Poison
Linware Detection in Executables, ArXiv (2018). ear Classifiers?, in: IJCNN 2021, Shenzhen, China,
a r X i v : 1 8 0 3 . 0 4 1 7 3 . July 18-22, 2021, IEEE, 2021, pp. 1–8. URL: https:
[9] L. Demetrio, B. Biggio, G. Lagorio, F. Roli, A. Ar- //doi.org/10.1109/IJCNN52387.2021.9533557. doi:1 0 .
mando, Functionality-preserving black-box opti- 1 1 0 9 / I J C N N 5 2 3 8 7 . 2 0 2 1 . 9 5 3 3 5 5 7 .
mization of adversarial windows malware, IEEE [19] A. E. Cinà, K. Grosse, S. Vascon, A. Demontis, B.
BigTransactions on Information Forensics and Security gio, F. Roli, M. Pelillo, Backdoor learning curves:
16 (2021) 3469–3478. Explaining backdoor poisoning beyond influence
[10] L. Demetrio, S. E. Coull, B. Biggio, G. Lagorio, A. Ar- functions, 2021. a r X i v : 2 1 0 6 . 0 7 2 1 4 .
mando, F. Roli, Adversarial EXEmples: A survey [20] Y. Zheng, X. Feng, Z. Xia, X. Jiang, A. Demontis,
and experimental evaluation of practical attacks on M. Pintor, B. Biggio, F. Roli, Why adversarial
repromachine learning for windows malware detection, gramming works, when it fails, and how to tell the
ACM Trans. Priv. Secur. 24 (2021). diference, Information Sciences (2023).
[11] A. Demontis, M. Melis, M. Pintor, M. Jagielski, [21] D. Maiorca, G. Giacinto, I. Corona, A pattern
recogB. Biggio, A. Oprea, C. Nita-Rotaru, F. Roli, Why nition system for malicious pdf files detection, in:
do adversarial attacks transfer? Explaining trans- P. Perner (Ed.), Machine Learning and Data
Minferability of evasion and poisoning attacks, in: ing in Pattern Recognition, volume 7376 of Lecture
USENIX Security, USENIX Association, 2019. Notes in Computer Science, Springer Berlin
Heidel[12] M. Pintor, D. Angioni, A. Sotgiu, L. Demetrio, A. De- berg, 2012, pp. 510–524.</p>
      <p>montis, B. Biggio, F. Roli, Imagenet-patch: A dataset [22] D. Maiorca, D. Ariu, I. Corona, G. Giacinto, A
for benchmarking machine learning robustness structural and content-based approach for a
preagainst adversarial patches, Pattern Recognition cise and robust detection of malicious PDF files, in:
134 (2023) 109064. O. Camp, E. R. Weippl, C. Bidan, E. Aïmeur (Eds.),
[13] M. Pintor, L. Demetrio, A. Sotgiu, A. Demontis, ICISSP 2015 - Proceedings of the 1st International
N. Carlini, B. Biggio, F. Roli, Indicators of Attack Conference on Information Systems Security and
Failure: Debugging and Improving Optimization Privacy, ESEO, Angers, Loire Valley, France, 9-11
of Adversarial Examples, in: A. H. Oh, A. Agar- February, 2015, SciTePress, 2015, pp. 27–36. URL:
wal, D. Belgrave, K. Cho (Eds.), Advances in Neu- https://doi.org/10.5220/0005264400270036. doi:1 0 .
ral Information Processing Systems, 2022. URL: 5 2 2 0 / 0 0 0 5 2 6 4 4 0 0 2 7 0 0 3 6 .</p>
      <p>https://openreview.net/forum?id=Y1sWzKW0k4L. [23] D. Maiorca, B. Biggio, G. Giacinto, Towards
[14] M. Pintor, L. Demetrio, G. Manca, B. Biggio, F. Roli, adversarial malware detection: Lessons learned
Slope: A First-order Approach for Measuring Gra- from pdf-based attacks, ACM Comput. Surv. 52
dient Obfuscation, in: Proc. of the ESANN, ESANN (2019) 78:1–78:36. URL: http://doi.acm.org/10.1145/
2021, 2021. 3332184. doi:1 0 . 1 1 4 5 / 3 3 3 2 1 8 4 .
[15] L. Muñoz-González, B. Biggio, A. Demontis, A. Pau- [24] D. Maiorca, A. Demontis, B. Biggio, F. Roli,
dice, V. Wongrassamee, E. C. Lupu, F. Roli, To- G. Giacinto, Adversarial Detection of
wards poisoning of deep learning algorithms Flash Malware: Limitations and Open
Iswith back-gradient optimization, in: Proc. of sues, Computers &amp; Security 96 (2020).
the 10th ACM Works. AISec@CCS 2017, 2017, URL: https://www.sciencedirect.com/science/
pp. 27–38. URL: https://doi.org/10.1145/3128572. article/pii/S0167404820301760?dgcid=rss_sd_all.
doi:h t t p s : / / d o i . o r g / 1 0 . 1 0 1 6 / j . c o s e . 2 0 2 0 . 1 0 1 9 0 1 .</p>
    </sec>
  </body>
  <back>
    <ref-list>
      <ref id="ref1">
        <mixed-citation>
          [1]
          <string-name>
            <given-names>B.</given-names>
            <surname>Biggio</surname>
          </string-name>
          ,
          <string-name>
            <given-names>I.</given-names>
            <surname>Corona</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Maiorca</surname>
          </string-name>
          ,
          <string-name>
            <given-names>B.</given-names>
            <surname>Nelson</surname>
          </string-name>
          ,
          <string-name>
            <given-names>N.</given-names>
            <surname>Šrndić</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Laskov</surname>
          </string-name>
          ,
          <string-name>
            <given-names>G.</given-names>
            <surname>Giacinto</surname>
          </string-name>
          ,
          <string-name>
            <given-names>F.</given-names>
            <surname>Roli</surname>
          </string-name>
          ,
          <article-title>Evasion attacks against machine learning at test time</article-title>
          , in: ECML PKDD,
          <string-name>
            <surname>Part</surname>
            <given-names>III</given-names>
          </string-name>
          , volume
          <volume>8190</volume>
          <source>of LNCS</source>
          ,
          <year>2013</year>
          , pp.
          <fpage>387</fpage>
          -
          <lpage>402</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref2">
        <mixed-citation>
          [2]
          <string-name>
            <given-names>B.</given-names>
            <surname>Biggio</surname>
          </string-name>
          ,
          <string-name>
            <given-names>B.</given-names>
            <surname>Nelson</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Laskov</surname>
          </string-name>
          ,
          <article-title>Poisoning attacks against support vector machines</article-title>
          ,
          <source>in: 29th ICML</source>
          ,
          <year>2012</year>
          , pp.
          <fpage>1807</fpage>
          -
          <lpage>1814</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref3">
        <mixed-citation>
          [3]
          <string-name>
            <given-names>B.</given-names>
            <surname>Biggio</surname>
          </string-name>
          ,
          <string-name>
            <given-names>F.</given-names>
            <surname>Roli</surname>
          </string-name>
          ,
          <article-title>Wild patterns: Ten years after the rise of adversarial machine learning</article-title>
          ,
          <source>Patt. Rec</source>
          .
          <volume>84</volume>
          (
          <year>2018</year>
          )
          <fpage>317</fpage>
          -
          <lpage>331</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref4">
        <mixed-citation>
          [4]
          <string-name>
            <given-names>D.</given-names>
            <surname>Maiorca</surname>
          </string-name>
          , I. Corona, G. Giacinto,
          <article-title>Looking at the bag is not enough to find the bomb: an evasion of structural methods for malicious pdf files detection</article-title>
          ,
          <source>in: Proceedings of the 8th ACM SIGSAC symposium on Information, computer and communications security</source>
          ,
          <source>ASIA CCS '13</source>
          ,
          <string-name>
            <surname>ACM</surname>
          </string-name>
          , New York, NY, USA,
          <year>2013</year>
          , pp.
          <fpage>119</fpage>
          -
          <lpage>130</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref5">
        <mixed-citation>
          [5]
          <string-name>
            <given-names>A.</given-names>
            <surname>Demontis</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Melis</surname>
          </string-name>
          ,
          <string-name>
            <given-names>B.</given-names>
            <surname>Biggio</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Maiorca</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Arp</surname>
          </string-name>
          ,
          <string-name>
            <given-names>K.</given-names>
            <surname>Rieck</surname>
          </string-name>
          , I. Corona,
          <string-name>
            <given-names>G.</given-names>
            <surname>Giacinto</surname>
          </string-name>
          ,
          <string-name>
            <given-names>F.</given-names>
            <surname>Roli</surname>
          </string-name>
          ,
          <article-title>Yes, machine learning can be more secure! a case study on android malware detection</article-title>
          ,
          <source>IEEE Trans. Dependable and Secure Computing</source>
          (
          <year>2019</year>
          ).
        </mixed-citation>
      </ref>
      <ref id="ref6">
        <mixed-citation>
          [6]
          <string-name>
            <given-names>F.</given-names>
            <surname>Zhang</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Chan</surname>
          </string-name>
          ,
          <string-name>
            <given-names>B.</given-names>
            <surname>Biggio</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Yeung</surname>
          </string-name>
          ,
          <string-name>
            <given-names>F.</given-names>
            <surname>Roli</surname>
          </string-name>
          , Ad-
        </mixed-citation>
      </ref>
    </ref-list>
  </back>
</article>