<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Archiving and Interchange DTD v1.0 20120330//EN" "JATS-archivearticle1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta />
    <article-meta>
      <title-group>
        <article-title>Policy Advice and Best Practices on Bias and Fairness in Artificial Intelligence⋆</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="author">
          <string-name>An Extended Abstract</string-name>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Jose M. Alvarez</string-name>
          <email>jose.alvarez@sns.it</email>
          <xref ref-type="aff" rid="aff12">12</xref>
          <xref ref-type="aff" rid="aff8">8</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Alejandra Bringas-Colmenarejo</string-name>
          <xref ref-type="aff" rid="aff13">13</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Alaa Elobaid</string-name>
          <xref ref-type="aff" rid="aff1">1</xref>
          <xref ref-type="aff" rid="aff2">2</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Simone Fabbrizzi</string-name>
          <xref ref-type="aff" rid="aff1">1</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Miriam Fahimi</string-name>
          <xref ref-type="aff" rid="aff11">11</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Antonio Ferrara</string-name>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff4">4</xref>
          <xref ref-type="aff" rid="aff9">9</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Siamak Ghodsi</string-name>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Carlos Mougan</string-name>
          <xref ref-type="aff" rid="aff13">13</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Ioanna Papageorgiou</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Paula Reyero</string-name>
          <xref ref-type="aff" rid="aff10">10</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Mayra Russo</string-name>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Kristen M. Scott</string-name>
          <xref ref-type="aff" rid="aff5">5</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Laura State</string-name>
          <xref ref-type="aff" rid="aff12">12</xref>
          <xref ref-type="aff" rid="aff8">8</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Xuan Zhao</string-name>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Salvatore Ruggieri</string-name>
          <email>salvatore.ruggieri@unipi.it</email>
          <xref ref-type="aff" rid="aff12">12</xref>
        </contrib>
        <aff id="aff0">
          <label>0</label>
          <institution>CENTAI</institution>
          ,
          <addr-line>Turin</addr-line>
          ,
          <country country="IT">Italy</country>
        </aff>
        <aff id="aff1">
          <label>1</label>
          <institution>CERTH</institution>
          ,
          <addr-line>Thessaloniki</addr-line>
          ,
          <country country="GR">Greece</country>
        </aff>
        <aff id="aff2">
          <label>2</label>
          <institution>Free University of Berlin</institution>
          ,
          <addr-line>Berlin</addr-line>
          ,
          <country country="DE">Germany</country>
        </aff>
        <aff id="aff3">
          <label>3</label>
          <institution>Free University of Bozen-Bolzano</institution>
          ,
          <addr-line>Bolzano</addr-line>
          ,
          <country country="IT">Italy</country>
        </aff>
        <aff id="aff4">
          <label>4</label>
          <institution>GESIS - Leibniz Institute</institution>
          ,
          <addr-line>Mannheim</addr-line>
          ,
          <country country="DE">Germany</country>
        </aff>
        <aff id="aff5">
          <label>5</label>
          <institution>KU Leuven</institution>
          ,
          <addr-line>Leuven</addr-line>
          ,
          <country country="BE">Belgium</country>
        </aff>
        <aff id="aff6">
          <label>6</label>
          <institution>Leibniz University Hannover</institution>
          ,
          <addr-line>Hannover</addr-line>
          ,
          <country country="DE">Germany</country>
        </aff>
        <aff id="aff7">
          <label>7</label>
          <institution>SCHUFA Holding AG</institution>
          ,
          <addr-line>Wiesbaden</addr-line>
          ,
          <country country="DE">Germany</country>
        </aff>
        <aff id="aff8">
          <label>8</label>
          <institution>Scuola Normale Superiore</institution>
          ,
          <addr-line>Pisa</addr-line>
          ,
          <country country="IT">Italy</country>
        </aff>
        <aff id="aff9">
          <label>9</label>
          <institution>TU Graz</institution>
          ,
          <addr-line>Graz</addr-line>
          ,
          <country country="AT">Austria</country>
        </aff>
        <aff id="aff10">
          <label>10</label>
          <institution>The Open University</institution>
          ,
          <addr-line>Milton Keynes</addr-line>
          ,
          <country country="UK">UK</country>
        </aff>
        <aff id="aff11">
          <label>11</label>
          <institution>University of Klagenfurt</institution>
          ,
          <addr-line>Klagenfurt</addr-line>
          ,
          <country country="AT">Austria</country>
        </aff>
        <aff id="aff12">
          <label>12</label>
          <institution>University of Pisa</institution>
          ,
          <addr-line>Pisa</addr-line>
          ,
          <country country="IT">Italy</country>
        </aff>
        <aff id="aff13">
          <label>13</label>
          <institution>University of Southampton</institution>
          ,
          <addr-line>Southampton</addr-line>
          ,
          <country country="UK">UK</country>
        </aff>
      </contrib-group>
      <abstract>
        <p>Introduction, motivation, and contributions. With the increasing usage of AI models in our daily lives, concerns have been raised on the negative impact of AI models on individuals and society due to their embedded biases [2]. There is a deep academic and social discussion around the alleged neutrality of these algorithmic systems as more examples confirm that such algorithmic systems are “value-laden in that they create moral consequences, reinforce or undercut ethical principles, or enable or diminish stakeholder rights and dignity” [3]. Included in that discussion is the interdisciplinary and growing field of fair-AI. In Álvarez et al. [1], we survey the fair-AI state-of-the-art of methods and resources as well as the latest policies on bias in AI, in turn, providing the much needed bird's-eye view for all stakeholders. Further, by</p>
      </abstract>
    </article-meta>
  </front>
  <body>
    <sec id="sec-1">
      <title>-</title>
      <p>leveraging from the results of the NoBIAS research project, we contribute to the ongoing policy
advice and best practices discussion, focusing on the European context.</p>
      <p>
        Fair-AI aims at designing methods for detecting, mitigating, and controlling biases in
AIsupported decision-making [
        <xref ref-type="bibr" rid="ref4 ref5">4, 5</xref>
        ]. Given its focus on bias and fairness, fair-AI has coalesced
multiple fields concerned with, among other research lines, the fairness of decision-making (e.g.,
[
        <xref ref-type="bibr" rid="ref6 ref7 ref8">6, 7, 8</xref>
        ]); bias as a cognitive, technical, and socio-technical phenomenon (e.g., [
        <xref ref-type="bibr" rid="ref10 ref9">9, 10, 11, 12</xref>
        ]);
and designing ML systems for social good (e.g., [13, 14, 15]). The state-of-the-art has been
developing mainly on the technical side, sometimes reducing fair-AI problems to a numeric
optimization problem under some fairness metric [16, 17, 18]. This hegemonic view on fair-AI
problems has been increasingly criticized within the own field (e.g., [ 19, 20, 21]), which, in
turn, has expanded the state-of-the-art. Additionally, it is important to include as part of the
state-of-the-art the regulatory frameworks being developed, in particular within the European
Union (EU) – such as the GDPR [22] and the AI Act [23] – to enforce fair-AI goals.
      </p>
      <p>
        It is challenging, especially for the novel researcher and practitioner interested in fair-AI
within the EU, to have a comprehensive view of the state-of-the-art. Therefore, the objectives
and, in turn, contributions of Álvarez et al. [
        <xref ref-type="bibr" rid="ref1">1</xref>
        ] are twofold:
• First, we provide an up-to-date entry-point to the state-of-the-art of the multidisciplinary
research on bias and fairness in AI. We take a bird’s-eye view of the methods and resources,
with links to specialized surveys, and of the issues and challenges related to policies on
bias and fairness in AI. Such an overview provides guidance for both new researchers
and AI practitioners.
• Second, we contribute toward the objective of providing policy advice and best practices for
dealing with bias and fairness in AI by leveraging from the results of the NoBIAS research
project. We present and discuss topics that emerged during the execution of the research
project, whose focus was on legal challenges in the context of the EU legislation, and on
understanding, mitigating, and accounting for bias from a multidisciplinary perspective.
The NoBIAS project. The NoBIAS project (January 2020 - June 2024) was a Marie
SkłodowskaCurie Innovative Training Network funded by the European Union’s Horizon 2020 research
and innovation program. The core objective of NoBIAS was to research and develop novel
interdisciplinary methods for AI-based decision-making without bias. 1
      </p>
      <p>Figure 1 shows the project’s architecture. The Bias Management Layer is made up of the
various components contributed by the research projects of the 15 NoBIAS Early-Stage Researchers
(ESRs). Together, these components aim to achieve three research objectives: understanding
bias, mitigating bias, and accounting for bias in data and AI-systems. An orthogonal Legal
Layer provides the necessary EU legal grounds. The purpose is not to produce one single bias
management framework but rather to combine technologies and techniques for generating
bias-aware AI-systems in diferent application domains and contexts.</p>
      <p>Paper structure. Following the objectives, the paper is divided into two main sections.</p>
      <p>
        In the Landscape of policies on bias and fairness in AI section, we provide a concise
overview of the state-of-the-art for fair-AI methods and policy topics. In this section, we point
1For more information, visit: https://nobias-project.eu/
to the main contributions and resources in the area, providing guidance for both researchers
and practitioners. First, we cover Fair-AI methods and resources, in which we explore
the fairness metrics (group-level, individual-level, and causality-based) [24, 25, 26, 27, 28, 29],
tracing back their origins to fields like Philosophy and Economics [
        <xref ref-type="bibr" rid="ref6">30, 6, 31, 32</xref>
        ]. We also
discuss common applications (e.g., computer vision [33] and recommender systems [34]) and
popular standardization initiatives (e.g., the IEEE P7003™ Standard2). Second, we cover Policies
on bias and fairness in AI, in which we discuss policy and guidelines inventories (e.g., the
OECD.AI Policy Observatory3); the option not to use AI (e.g., the Stop LAPD Spying Coalition4);
documentation practices for bias (e.g., [35, 36, 37, 38]); and EU legal regimes and discussions
around them (e.g., [23, 22, 39, 40, 41]); among other topics.
      </p>
      <p>In the Lessons from the NoBIAS project section, we discuss policy advice and best practices
resulting from the execution of the NoBIAS research project. Here, we take a critical view
on the literature, focusing on findings from the NoBIAS project (e.g., [ 42, 43, 44, 45, 33, 46,
47, 48, 49]). We argue that the issues discussed are relevant, but not suficiently developed or
acknowledged in the fair-AI literature. Thus, this section further enriches the state-of-the-art.
This section is organized according to the NoBIAS architecture in Figure 1. We cover the
NoBIAS Bias Management Layer through the subsections Understanding bias, Mitigating
bias, and Accounting for bias as well as the NoBIAS Legal Layer through the subsection Legal
challenges of bias in AI. Each subsection is further divided into relevant topics/themes. For
2https://standards.ieee.org/project/7003.html
3https://oecd.ai/en/dashboards/overview
4https://stoplapdspying.org/wp-content/uploads/2018/05/Before-the-Bullet-Hits-the-Body-May-8-2018.pdf
• AI models often lack the auxiliary causal knowledge required to prove anti-discrimination
cases as these require to show that the decision is because of the protected attribute.
• AI models’ complexity and opaqueness make it dificult to identify individuals and groups
that are treated unfairly.
• The design of AI models requires to agree on and to operationalise legal and ethical principles.
• Transparency and accountability of AI systems are a way to overcome the hegemonic theory
of fairness, which reduces fairness problems to quantitative metric optimization.
• There are synergies and frictions in the EU legal framework between data protection law and
non-discrimination law, which demand for an integrated and interdisciplinary techno-legal
framework of bias management.
• We should acknowledge that there are many forms of bias, with diferent roots and efects.
• The “ground-truth" is a myth. It does not exist in a structurally unjust and unequal society.
• Data curation in AI should import source criticism and archival practices from historical and
humanistic disciplines.
• There is an hyper-fixation on data as the primary source of bias, but the whole AI pipeline
needs to be addressed, including the data annotation process and data labourers’ exploitation.
• Diferent data types require specific regulatory guidelines and standards.
instance, in Understanding bias we discuss the subjectivity of bias; argue that the notion
of ground-truth can be itself biased; provide source criticism and archival practices on bias
documentation; discuss data annotation; and present data types as a source of bias.</p>
      <p>Each of these NoBIAS subsections is summarized in the form of a set of challenges, policy
advice, and best practices aimed at all stakeholders. We present two of these below – Figures 2
and 3 – as representative examples. Each item listed in Figure 2 corresponds to a fair-AI topic
discussed within the subsection Legal challenges of bias in AI. Similarly, each item in Figure 3
corresponds to a topic discussed within the subsection Understanding bias. All items are
substantiated using the relevant fair-AI literature. Naturally, the choice of topic (i.e., item) was
conditioned by the works of the NoBIAS ESRs.</p>
      <p>
        Conclusion. In this work we provide a comprehensive introduction to the multidisciplinary and
growing fair-AI literature. Using the NoBIAS research project as a guide, we extend the current
discussion around the state-of-the-art by focusing on themes studied throughout the project.
Leveraging on the NoBIAS architecture (Figure 1), we dwell into ongoing fair-AI research topics,
position these topics within the EU regulatory framework, and provide best practices and policy
advice to the general practitioner (e.g., Figure 2). While we do not claim for their completeness,
we hope that the policy advice and best practices provided in this paper will contribute to the
conventional wisdom in research of and the ongoing discussion on managing bias and fairness
in AI. Please refer to Álvarez et al. [
        <xref ref-type="bibr" rid="ref1">1</xref>
        ] for a complete discussion.
      </p>
      <p>Acknowledgments. This work has received funding from the European Union’s Horizon
2020 research and innovation program under Marie Sklodowska-Curie Actions (grant agreement
number 860630) for the project "NoBIAS - Artificial Intelligence without Bias". This work reflects
only the authors’ views and the European Research Executive Agency (REA) is not responsible
for any use that may be made of the information it contains. Jose M. Alvarez and Salvatore
Ruggieri are also partially supported by the European Community H2020-EU.2.1.1 program
under the G.A. 952215 Tailor.
NeHuAI@ECAI, volume 2659 of CEUR Workshop Proceedings, CEUR-WS.org, 2020, pp.
3–10.
[11] D. A. Grimes, K. F. Schulz, Bias and causal associations in observational research, Lancet
359 (2002) 248–252.
[12] D. Danks, A. J. London, Algorithmic bias in autonomous systems, in: IJCAI, ijcai.org, 2017,
pp. 4691–4697.
[13] D. Pedreschi, S. Ruggieri, F. Turini, Discrimination-aware data mining, in: KDD, ACM,
2008, pp. 560–568.
[14] F. Kamiran, T. Calders, Classifying without discriminating, in: Int. Conference on
Computer, Control and Communication, IEEE, 2009, pp. 1–6.
[15] F. Kamiran, A. Karim, X. Zhang, Decision theory for discrimination-aware classification,
in: ICDM, IEEE Computer Society, 2012, pp. 924–929.
[16] S. Ruggieri, J. M. Álvarez, A. Pugnana, L. State, F. Turini, Can we trust fair-AI?, in: AAAI,</p>
      <p>AAAI Press, 2023, pp. 15421–15430.
[17] A. N. Carey, X. Wu, The statistical fairness field guide: Perspectives from social and formal
sciences, AI Ethics 3 (2023) 1–23.
[18] L. Weinberg, Rethinking fairness: An interdisciplinary survey of critiques of hegemonic</p>
      <p>ML fairness approaches, J. Artif. Intell. Res. 74 (2022) 75–109.
[19] K. Wagstaf, Machine learning that matters, in: ICML, icml.cc / Omnipress, 2012.
[20] B. D. Mittelstadt, S. Wachter, C. Russell, The unfairness of fair machine learning: Levelling
down and strict egalitarianism by default, CoRR abs/2302.02404 (2023).
[21] T. Scantamburlo, Non-empirical problems in fair machine learning, Ethics Inf. Technol. 23
(2021) 703–712.
[22] European Parliament, Council of the European Union, Regulation (EU) 2016/679 of the
European Parliament and of the Council of 27 April 2016 on the protection of natural
persons with regard to the processing of personal data and on the free movement of such
data, and repealing Directive 95/46/EC (General Data Protection Regulation), Oficial
Journal of the European Union L 119 (2016). URL: http://data.europa.eu/eli/reg/2016/679/oj.
[23] European Commission, Proposal for a Regulation of the European Parliament and of the
Council Laying down harmonised rules on Artificial Intelligence (AI Act) and amending
certain Union legislative acts, 2021. URL: https://eur-lex.europa.eu/legal-content/EN/TXT/
?uri=CELEX:52021PC0206.
[24] A. Castelnovo, R. Crupi, G. Greco, D. Regoli, I. G. Penco, A. C. Penco, A clarification of the
nuances in the fairness metrics landscape, Scientific Reports 12 (2022) 4209.
[25] N. Mehrabi, F. Morstatter, N. Saxena, K. Lerman, A. Galstyan, A survey on bias and fairness
in machine learning, ACM Comput. Surv. 54 (2021) 115:1–115:35.
[26] R. Berk, H. Heidari, S. Jabbari, M. Kearns, A. Roth, Fairness in criminal justice risk
assessments: The state of the art, Sociological Methods &amp; Research 50 (2021) 3–44.
[27] S. Verma, J. Rubin, Fairness definitions explained, in: FairWare@ICSE, ACM, 2018, pp.</p>
      <p>1–7.
[28] I. Zliobaite, Measuring discrimination in algorithmic decision making, Data Min. Knowl.</p>
      <p>Discov. 31 (2017) 1060–1089.
[29] S. Caton, C. Haas, Fairness in machine learning: A survey, ACM Comput. Surv. (2024) to
appear.
[30] M. S. A. Lee, L. Floridi, J. Singh, Formalising trade-ofs beyond algorithmic fairness: lessons
from ethical philosophy and welfare economics, AI Ethics 1 (2021) 529–544.
[31] R. Binns, Fairness in machine learning: Lessons from political philosophy, in: FAT,
volume 81 of Proc. of Machine Learning Research, PMLR, 2018, pp. 149–159.
[32] A. Romei, S. Ruggieri, A multidisciplinary survey on discrimination analysis, Knowl. Eng.</p>
      <p>Rev. 29 (2014) 582–638.
[33] S. Fabbrizzi, S. Papadopoulos, E. Ntoutsi, I. Kompatsiaris, A survey on bias in visual
datasets, Comput. Vis. Image Underst. 223 (2022) 103552.
[34] J. Chen, H. Dong, X. Wang, F. Feng, M. Wang, X. He, Bias and debias in recommender
system: A survey and future directions, ACM Trans. Inf. Syst. 41 (2023) 67:1–67:39.
[35] T. Gebru, J. Morgenstern, B. Vecchione, J. W. Vaughan, H. M. Wallach, H. D. III, K. Crawford,</p>
      <p>Datasheets for datasets, Commun. ACM 64 (2021) 86–92.
[36] I. D. Raji, J. Yang, ABOUT ML: annotation and benchmarking on understanding and
transparency of machine learning lifecycles, CoRR abs/1912.06166 (2019).
[37] J. Stoyanovich, S. Abiteboul, B. Howe, H. V. Jagadish, S. Schelter, Responsible data
management, Commun. ACM 65 (2022) 64–74.
[38] I. D. Raji, A. Smart, R. N. White, M. Mitchell, T. Gebru, B. Hutchinson, J. Smith-Loud,
D. Theron, P. Barnes, Closing the AI accountability gap: defining an end-to-end framework
for internal algorithmic auditing, in: FAT*, ACM, 2020, pp. 33–44.
[39] I. Mendoza, L. A. Bygrave, The right not to be subject to automated decisions based on
profiling, EU Internet Law: Regulation and Enforcement (2017) 77–98.
[40] Article 29 Data Protection Working Party, Guidelines on automated individual
decisionmaking and profiling for the purposes of regulation 2016/679 (wp251rev.01), 2018. URL:
https://ec.europa.eu/newsroom/article29/items/612053.
[41] A. Balayn, S. Gürses, Beyond debiasing: Regulating AI and Its Inequalities, Technical</p>
      <p>Report, European Digital Rights (EDRi), 2021.
[42] A. Ferrara, L. E. Noboa, F. Karimi, C. Wagner, Link recommendations: Their impact on
network structure and minorities, in: WebSci, ACM, 2022, pp. 228–238.
[43] L. State, M. Fahimi, Careful explanations: A feminist perspective on XAI, in: EWAF,
volume 3442 of CEUR Workshop Proceedings, CEUR-WS.org, 2023.
[44] L. State, H. Salat, S. Rubrichi, Z. Smoreda, Explainability in practice: Estimating
electrification rates from mobile phone data in senegal, CoRR abs/2211.06277 (2022).
[45] P. R. Lobo, E. Daga, H. Alani, M. Fernández, Semantic web technologies and bias in</p>
      <p>Artificial Intelligence: A systematic literature review, Semantic Web 14 (2023) 745–770.
[46] C. Mougan, J. M. Álvarez, S. Ruggieri, S. Staab, Fairness implications of encoding protected
categorical attributes, in: AIES, ACM, 2023, pp. 454–465.
[47] K. M. Scott, S. M. Wang, M. Miceli, P. Delobelle, K. Sztandar-Sztanderska, B. Berendt,
Algorithmic tools in public employment services: Towards a jobseeker-centric perspective,
in: FAccT, ACM, 2022, pp. 2138–2148.
[48] J. M. Álvarez, K. M. Scott, B. Berendt, S. Ruggieri, Domain adaptive decision trees:
Implications for accuracy and fairness, in: FAccT, ACM, 2023, pp. 423–433.
[49] J. M. Álvarez, S. Ruggieri, Counterfactual situation testing: Uncovering discrimination
under fairness given the diference, in: EAAMO, ACM, 2023, pp. 2:1–2:11.</p>
    </sec>
  </body>
  <back>
    <ref-list>
      <ref id="ref1">
        <mixed-citation>
          [1]
          <string-name>
            <given-names>J. M.</given-names>
            <surname>Álvarez</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Bringas-Colmenarejo</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Elobaid</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S.</given-names>
            <surname>Fabbrizzi</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Fahimi</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Ferrara</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S.</given-names>
            <surname>Ghodsi</surname>
          </string-name>
          ,
          <string-name>
            <given-names>C.</given-names>
            <surname>Mougan</surname>
          </string-name>
          ,
          <string-name>
            <surname>I. Papageorgiou</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P. R.</given-names>
            <surname>Lobo</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Russo</surname>
          </string-name>
          ,
          <string-name>
            <given-names>K. M.</given-names>
            <surname>Scott</surname>
          </string-name>
          ,
          <string-name>
            <given-names>L.</given-names>
            <surname>State</surname>
          </string-name>
          ,
          <string-name>
            <given-names>X.</given-names>
            <surname>Zhao</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S.</given-names>
            <surname>Ruggieri</surname>
          </string-name>
          ,
          <article-title>Policy advice and best practices on bias and fairness in AI, Ethics Inf</article-title>
          . Technol.
          <volume>26</volume>
          (
          <year>2024</year>
          )
          <fpage>31</fpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref2">
        <mixed-citation>
          [2]
          <string-name>
            <given-names>R.</given-names>
            <surname>Shelby</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S.</given-names>
            <surname>Rismani</surname>
          </string-name>
          ,
          <string-name>
            <given-names>K.</given-names>
            <surname>Henne</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Moon</surname>
          </string-name>
          ,
          <string-name>
            <given-names>N.</given-names>
            <surname>Rostamzadeh</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Nicholas</surname>
          </string-name>
          ,
          <string-name>
            <given-names>N.</given-names>
            <surname>Yilla-Akbari</surname>
          </string-name>
          ,
          <string-name>
            <given-names>J.</given-names>
            <surname>Gallegos</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Smart</surname>
          </string-name>
          , E. Garcia, G. Virk,
          <article-title>Sociotechnical harms of algorithmic systems: Scoping a taxonomy for harm reduction</article-title>
          , in: AIES, ACM,
          <year>2023</year>
          , p.
          <fpage>723</fpage>
          -
          <lpage>741</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref3">
        <mixed-citation>
          [3]
          <string-name>
            <given-names>K.</given-names>
            <surname>Martin</surname>
          </string-name>
          ,
          <article-title>Ethical implications and accountability of algorithms</article-title>
          ,
          <source>Journal of Business Ethics</source>
          <volume>160</volume>
          (
          <year>2019</year>
          )
          <fpage>835</fpage>
          -
          <lpage>850</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref4">
        <mixed-citation>
          [4]
          <string-name>
            <given-names>R.</given-names>
            <surname>Schwartz</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Vassilev</surname>
          </string-name>
          ,
          <string-name>
            <given-names>K.</given-names>
            <surname>Greene</surname>
          </string-name>
          ,
          <string-name>
            <given-names>L.</given-names>
            <surname>Perine</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Burt</surname>
          </string-name>
          , P. Hall,
          <article-title>Towards a Standard for Identifying and Managing Bias in Artificial Intelligence</article-title>
          ,
          <source>Technical Report 1270</source>
          , NIST Special Publication,
          <year>2022</year>
          .
        </mixed-citation>
      </ref>
      <ref id="ref5">
        <mixed-citation>
          [5]
          <string-name>
            <given-names>E.</given-names>
            <surname>Ntoutsi</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Fafalios</surname>
          </string-name>
          ,
          <string-name>
            <given-names>U.</given-names>
            <surname>Gadiraju</surname>
          </string-name>
          ,
          <string-name>
            <given-names>V.</given-names>
            <surname>Iosifidis</surname>
          </string-name>
          ,
          <string-name>
            <given-names>W.</given-names>
            <surname>Nejdl</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Vidal</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S.</given-names>
            <surname>Ruggieri</surname>
          </string-name>
          ,
          <string-name>
            <given-names>F.</given-names>
            <surname>Turini</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S.</given-names>
            <surname>Papadopoulos</surname>
          </string-name>
          ,
          <string-name>
            <given-names>E.</given-names>
            <surname>Krasanakis</surname>
          </string-name>
          , I. Kompatsiaris,
          <string-name>
            <given-names>K.</given-names>
            <surname>Kinder-Kurlanda</surname>
          </string-name>
          ,
          <string-name>
            <given-names>C.</given-names>
            <surname>Wagner</surname>
          </string-name>
          ,
          <string-name>
            <given-names>F.</given-names>
            <surname>Karimi</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Fernández</surname>
          </string-name>
          ,
          <string-name>
            <given-names>H.</given-names>
            <surname>Alani</surname>
          </string-name>
          ,
          <string-name>
            <given-names>B.</given-names>
            <surname>Berendt</surname>
          </string-name>
          ,
          <string-name>
            <given-names>T.</given-names>
            <surname>Kruegel</surname>
          </string-name>
          ,
          <string-name>
            <given-names>C.</given-names>
            <surname>Heinze</surname>
          </string-name>
          ,
          <string-name>
            <given-names>K.</given-names>
            <surname>Broelemann</surname>
          </string-name>
          , G. Kasneci,
          <string-name>
            <given-names>T.</given-names>
            <surname>Tiropanis</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S.</given-names>
            <surname>Staab</surname>
          </string-name>
          ,
          <article-title>Bias in data-driven Artificial Intelligence systems - An introductory survey</article-title>
          ,
          <source>WIREs Data Mining Knowl. Discov</source>
          .
          <volume>10</volume>
          (
          <year>2020</year>
          ).
        </mixed-citation>
      </ref>
      <ref id="ref6">
        <mixed-citation>
          [6]
          <string-name>
            <given-names>B.</given-names>
            <surname>Hutchinson</surname>
          </string-name>
          , M. Mitchell,
          <article-title>50 years of test (un)fairness: Lessons for machine learning</article-title>
          ,
          <source>in: FAT, ACM</source>
          ,
          <year>2019</year>
          , pp.
          <fpage>49</fpage>
          -
          <lpage>58</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref7">
        <mixed-citation>
          [7]
          <string-name>
            <given-names>B.</given-names>
            <surname>Friedman</surname>
          </string-name>
          ,
          <string-name>
            <given-names>H.</given-names>
            <surname>Nissenbaum</surname>
          </string-name>
          ,
          <article-title>Bias in computer systems</article-title>
          ,
          <source>ACM Trans. Inf. Syst</source>
          .
          <volume>14</volume>
          (
          <year>1996</year>
          )
          <fpage>330</fpage>
          -
          <lpage>347</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref8">
        <mixed-citation>
          [8]
          <string-name>
            <given-names>S.</given-names>
            <surname>Lowry</surname>
          </string-name>
          ,
          <string-name>
            <given-names>G.</given-names>
            <surname>Macpherson</surname>
          </string-name>
          ,
          <article-title>A blot on the profession</article-title>
          ,
          <source>British Medical Journal</source>
          <volume>296</volume>
          (
          <year>1986</year>
          )
          <fpage>657</fpage>
          -
          <lpage>658</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref9">
        <mixed-citation>
          [9]
          <string-name>
            <given-names>M. G.</given-names>
            <surname>Haselton</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Nettle</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P. W.</given-names>
            <surname>Andrews</surname>
          </string-name>
          ,
          <article-title>The evolution of cognitive bias</article-title>
          , in: E. N.
          <string-name>
            <surname>Zalta</surname>
          </string-name>
          (Ed.),
          <source>Handbook of Evolutionary Psychology</source>
          , John Wiley &amp; Sons Inc.,
          <year>2005</year>
          , p.
          <fpage>724</fpage>
          -
          <lpage>746</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref10">
        <mixed-citation>
          [10]
          <string-name>
            <given-names>T.</given-names>
            <surname>Hellström</surname>
          </string-name>
          ,
          <string-name>
            <given-names>V.</given-names>
            <surname>Dignum</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S.</given-names>
            <surname>Bensch</surname>
          </string-name>
          ,
          <article-title>Bias in machine learning - what is it good for?</article-title>
          , in:
        </mixed-citation>
      </ref>
    </ref-list>
  </back>
</article>