<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">A Comprehensive Strategy to Bias and Mitigation in Human Resource Decision Systems</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Silvia</forename><surname>D'amicantonio</surname></persName>
							<email>silvia.damicantonio@mail.polimi.it</email>
							<affiliation key="aff0">
								<orgName type="institution">Otto von Guericke University Magdeburg</orgName>
								<address>
									<settlement>Magdeburg</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">Polytechnic University of Milan</orgName>
								<address>
									<settlement>Milan</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Mishal</forename><forename type="middle">Kizhakkam</forename><surname>Kulangara</surname></persName>
							<affiliation key="aff0">
								<orgName type="institution">Otto von Guericke University Magdeburg</orgName>
								<address>
									<settlement>Magdeburg</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Het</forename><surname>Darshan Mehta</surname></persName>
							<affiliation key="aff0">
								<orgName type="institution">Otto von Guericke University Magdeburg</orgName>
								<address>
									<settlement>Magdeburg</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Shalini</forename><surname>Pal</surname></persName>
							<email>shalini.pal@st.ovgu.de</email>
							<affiliation key="aff0">
								<orgName type="institution">Otto von Guericke University Magdeburg</orgName>
								<address>
									<settlement>Magdeburg</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Marco</forename><surname>Levantesi</surname></persName>
							<email>marco.levantesi@ovgu.de</email>
							<affiliation key="aff0">
								<orgName type="institution">Otto von Guericke University Magdeburg</orgName>
								<address>
									<settlement>Magdeburg</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="institution">Leibniz Institute for Educational Media | Georg Eckert Institute</orgName>
								<address>
									<settlement>Brunswick</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Marco</forename><surname>Polignano</surname></persName>
							<email>marco.polignano@uniba.it</email>
							<affiliation key="aff3">
								<orgName type="institution">University of Bari Aldo Moro</orgName>
								<address>
									<settlement>Bari</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Erasmo</forename><surname>Purificato</surname></persName>
							<email>erasmo.purificato@acm.org</email>
							<affiliation key="aff4">
								<orgName type="department">Joint Research Centre</orgName>
								<orgName type="institution">European Commission</orgName>
								<address>
									<settlement>Ispra</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Ernesto</forename><forename type="middle">William</forename><surname>De Luca</surname></persName>
							<email>deluca@ovgu.de</email>
							<affiliation key="aff0">
								<orgName type="institution">Otto von Guericke University Magdeburg</orgName>
								<address>
									<settlement>Magdeburg</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="institution">Leibniz Institute for Educational Media | Georg Eckert Institute</orgName>
								<address>
									<settlement>Brunswick</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">A Comprehensive Strategy to Bias and Mitigation in Human Resource Decision Systems</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">925B108BD6CDEE75287061084D9FA782</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2025-04-23T18:10+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Machine Learning</term>
					<term>Biases and Fairness</term>
					<term>Human Resource Decision-Making</term>
					<term>Mitigation Strategies</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>In recent years, Machine Learning (ML) and Artificial Intelligence (AI) models have become integral to various business operations, especially within Human Resource (HR) systems. These models are primarily used to automate decision-making processes in recruitment, performance assessment, and employee management, enhancing efficiency and streamlining tasks. However, the increasing use of these automated systems has raised significant concerns about the presence of bias, which can lead to discriminatory practices. Such biases may exclude qualified candidates and diminish opportunities, while also posing substantial risks to a company's reputation, with potential legal and ethical consequences. This paper addresses these challenges by exploring the root causes of bias in HR-related ML models and proposing best practices for mitigation. It presents a thorough examination of fairness concepts and definitions within the context of HR decision-making, emphasizing the complex nature of selecting appropriate mitigation techniques based on the specific models and datasets used. Through an empirical evaluation of various mitigation strategies, the study reveals that no single approach can fully satisfy all fairness metrics, highlighting the inherent trade-offs between accuracy and fairness. The findings offer valuable insights into optimizing these trade-offs and provide actionable recommendations for achieving fairer, unbiased outcomes in automated HR systems. Additionally, this research underscores the ongoing need for further study and discussion to enhance transparency and fairness in ML models, contributing to a more equitable HR landscape.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>The rise of Artificial Intelligence (AI) and Machine Learning (ML) has revolutionized numerous industries, with Human Resources (HR) being one of the most significantly impacted <ref type="bibr" target="#b1">[2,</ref><ref type="bibr" target="#b2">3,</ref><ref type="bibr" target="#b3">4]</ref>. Globally, companies are increasingly adopting these technologies to enhance decision-making capabilities and boost efficiency <ref type="bibr" target="#b4">[5]</ref>. A specific class of ML technologies, commonly referred to as Black Box mod-els <ref type="bibr" target="#b5">[6,</ref><ref type="bibr" target="#b6">7,</ref><ref type="bibr" target="#b7">8]</ref>, is characterized by the opacity of their internal workings as they take inputs and produce output, but the decision-making process remains hidden. Major corporations such as Google, IBM, SAP, and Microsoft are already utilizing these algorithmic systems for automated HR management <ref type="bibr" target="#b8">[9]</ref>. Black Box models in HR streamline key functions such as recruitment and performance evaluation. The main drivers for their adoption include cost and time savings, increased productivity, and enhanced certainty in decision-making <ref type="bibr" target="#b9">[10,</ref><ref type="bibr" target="#b10">11]</ref>. AI is widely used to evaluate employee engagement and retention by analyzing feedback surveys and performance data. These insights are then applied to monitor achievement, recommend personalized job opportunities, and set objectives. Additionally, AI tools can assist in corrective actions for underperformance and inappropriate behaviours, and even support training by identifying employees likely to make errors and suggesting relevant skill-improvement programs. In recruitment, AI primarily contributes by screening resumes, identifying key terms in job applications, and analyzing video interviews to evaluate job fit and match candidates to open positions.</p><p>Although AI is often viewed as providing fairer, more impartial decision-making than humans, recent studies reveal a high risk of bias and discrimination in these systems <ref type="bibr" target="#b8">[9,</ref><ref type="bibr" target="#b11">12,</ref><ref type="bibr" target="#b12">13,</ref><ref type="bibr" target="#b13">14,</ref><ref type="bibr" target="#b14">15]</ref>. Bias can manifest in several ways during the implementation of decision-making algorithms. For instance, historical data used for training models may reflect past societal imbalances, resulting in these biases being reproduced in AI-driven decisions <ref type="bibr" target="#b15">[16]</ref>. The opaque nature of Black Box models exacerbates this issue, making bias identification and mitigation particularly challenging. The complexity of the underlying algorithms and deep learning techniques makes these models difficult to interpret. This lack of trasparency poses ethical and legal risks, potentially leading to discriminatory hiring practices and damaging a company's reputation <ref type="bibr" target="#b8">[9]</ref>. In response, researchers and developers have proposed various strategies to address these biases, including using more diverse training datasets, implementing fariness-aware algorithms, and ensuring greater transparency and accountability in AI systems <ref type="bibr" target="#b16">[17,</ref><ref type="bibr" target="#b17">18]</ref>. While AI offers tremendous potential to enhance HR processes, it is essential to recognize and mitigate the biases these systems may introduce. Achieving fair and unbiased AI in HR requires a combination of better data practices, increased transparency, regulation, and continuous scrutiny and adjustment of AI models. This paper explores the inherent biases in HR-related Black Box models and outlines strategies for mitigating these biases to ensure fair and equitable decision-making.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Related Work</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.">Understanding Biases and Mitigation Techniques</head><p>To fully grasp the reasons behind bias algorithms, it is essential to first review the concept of bias. We refer to Cognitive Bias as the type of bias that can be introduced in hiring processes supported by AI <ref type="bibr" target="#b18">[19]</ref>. When the latter is used in hiring, the lack of transparency and accountability can heighten the risk of replicating social discrimination. The following subsections explore potential causes of bias and propose strategies to mitigate them.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.1.">Source and Implication of Bias</head><p>Cognitive bias, a well-documented phenomenon in human decision-making, can also affect AI-driven recruitment. Soleimani et al. <ref type="bibr" target="#b19">[20]</ref> identify two primary sources of bias in AI: the training dataset and the algorithm itself. Training datasets often contain historical data, which may include underrepresented or overrepresented groups. Furthermore, these datasets may encode biases related to sensitive attributes due to mislabeled data. This can result in the exclusion of highly qualified candidates or even lead to legal issues as a consequence of violating anti-discrimination laws <ref type="bibr" target="#b11">[12]</ref>  <ref type="bibr" target="#b20">[21]</ref>. Algorithmic biases can arise when developers make subjective assumptions or use inappropriate selection criteria. For example, including ethnicity, culture or gender in an algorithm can lead to wrong correlations between these attributes and the target variable <ref type="bibr" target="#b21">[22]</ref>. Finally, algorithms could fail to account for job-specific requirements and produce decisions that are misaligned with the actual needs of the position <ref type="bibr" target="#b11">[12]</ref>.  <ref type="bibr" target="#b19">[20]</ref>. The mitigation techniques are classified accordingly.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.2.">Mitigation Strategies</head><p>Bias mitigation can be addressed at multiple stages of AI tool development. First, ensuring that the training dataset is representative of the population is crucial. Data should be sourced from diverse demographic groups and regularly updated to prevent the perpetuation of historical biases. Vivek <ref type="bibr" target="#b22">[23]</ref> suggests that blind recruitment is an effective method for reducing unconscious bias. In the context of AI, blind hiring involves masking potentially bias-inducing variables from resumes in order to let the algorithm focus purely on skills and experience of candidates. Another key strategy for mitigating algorithmic bias is knowledge sharing between AI developers and HR professionals. Soleimani et al. <ref type="bibr" target="#b19">[20]</ref> demonstrate that exchanging information at different stages of development improves recruitment model performance. Finally, independent audits and periodic assessments are vital for detecting biases and ensuring that the algorithm remains fair over time <ref type="bibr" target="#b11">[12]</ref>. It is also suggested to release audit results since it can build trust with consumers and ensure transparency. In Table <ref type="table" target="#tab_0">1</ref> the potential sources of bias as well as their mitigation techniques are summarized.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.3.">Regulatory and Ethical Consideration</head><p>The rapid expansion of automated decision-making systems has highlighted the need for government regulation to ensure fairness for all individuals. Many countries have enacted laws to prevent discrimination based on ethnicity, gender, religion or nationality <ref type="bibr" target="#b23">[24]</ref>. In the context of employment, the EU AI Act (Annex III: Article 6(2)) classifies AI systems used in recruitment, employee management, and termination as high-risk. As per the law, systems must be regulated to ensure fairness, transparency, and non-discrimination in hiring and workplace decisions, thus minimizing bias to protect individual's rights <ref type="bibr" target="#b24">[25]</ref>. Additionally, the U.S. Equal Employment Opportunity Commission (EEOC) has established guidelines, like the usage of the four-fifths rule (Table <ref type="table" target="#tab_2">2</ref>), to promote equal employment opportunities and prevent bias during the hiring process <ref type="bibr" target="#b25">[26]</ref>. Generally speaking, three main theories of discrimination are often used to analyze bias:</p><p>• Disparate Treatment: Refers to intentional discrimination based on protected characteristics <ref type="bibr" target="#b26">[27,</ref><ref type="bibr" target="#b25">26]</ref>. Using sensitive attributes to build the model can prevent unfairness but it could also violate anti-discrimination laws and produce disparate treatment <ref type="bibr" target="#b26">[27]</ref>. • Disparate Impact: Addresses unintentional discrimination, where proxy (not explicitly sensitive) attributes lead to disproportionate negative outcomes for a protected group <ref type="bibr" target="#b26">[27]</ref>. • Disparate Mistreatment: Focuses on differences in misclassification rates between groups based on sensitive attributes, considering false positive and false negative rates when evaluating fairness <ref type="bibr" target="#b27">[28]</ref>.</p><p>The distinction between disparate impact and disparate mistreatment is important. In cases where ground truth data is unavailable and historical data is unreliable, disparate mistreatment may not be suitable due to difficulty in distinguishing between correct and incorrect classifications. On the other hand, when ground truth data is available, focusing on disparate impact may lead to reverse discrimination <ref type="bibr" target="#b27">[28]</ref>. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.">Fairness Metrics</head><p>Several fairness metrics have been proposed to assess the fairness of decision-making systems. This section highlights some of the most widely discussed metrics, which are generally categorized into two main types: Individual Fairness and Group Fairness <ref type="bibr" target="#b29">[29,</ref><ref type="bibr" target="#b30">30]</ref>. Individual Fairness refers to ensuring that predictions are fair for each individual, whereas Group Fairness focuses on equal treatment of groups with different values for sensitive attributes.</p><p>To define these metrics, the following notation is introduced:</p><p>• 𝑋 : Input feature vector of applicants, excluding sensitive attributes.</p><p>• 𝐴 : Sensitive attributes (e.g., race, gender).</p><p>• 𝐶 : Binary classifier mapping 𝑋 and 𝐴 to a prediction 𝐶.</p><p>• 𝑌 : The actual outcome of the model.</p><p>Hence, the probability to observe an event 𝐸 given that the attribute 𝐴 has assumed value 𝑎 is:</p><formula xml:id="formula_0">𝑃 𝑎 (𝐸) = 𝑃 (𝐸|𝐴 = 𝑎)<label>(1)</label></formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.1.">Individual Fairness</head><p>Fairness through unawareness: An algorithm can produce fair outcomes by excluding all sensitive attributes from the input feature vector, preventing the system from relying on these attributes to make a decision <ref type="bibr" target="#b16">[17,</ref><ref type="bibr" target="#b17">18]</ref>. Thus, the final outcome can be defined as</p><formula xml:id="formula_1">𝐶 = 𝐶(𝑋, 𝐴) = 𝐶(𝑋)<label>(2)</label></formula><p>potential issue −→ Attributes that are correlated with sensitive information (proxies) may still lead to biased outcomes.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Fairness through awareness:</head><p>In this approach, an algorithm is considered fair if it produces similar outcomes for similar individuals. Specifically, if two applicants have similar feature vectors, the probability distributions of their predicted outcomes should also be similar, assuming a small similarity metric 𝑑(𝑖, 𝑗) <ref type="bibr" target="#b17">[18,</ref><ref type="bibr" target="#b30">30]</ref>:</p><formula xml:id="formula_2">𝐶(𝑋 𝑖 , 𝐴 𝑖 ) ≈ 𝐶(𝑋 𝑗 , 𝐴 𝑗 )<label>(3)</label></formula><p>Where:</p><p>• 𝑋 𝑖 and 𝐴 𝑖 are the feature vectors of applicant 𝑖.</p><p>• 𝑋 𝑗 and 𝐴 𝑗 are the feature vectors of applicant 𝑗.</p><p>Counterfactual Fairness: A model is counterfactually fair if the prediction for an individual remains the same in both the real world and in a counterfactual world where the individual belongs to a different demographic group <ref type="bibr" target="#b30">[30]</ref>. The causal relationship between 𝑋 and 𝐴 must be such that, if 𝐴 changes from 𝑎 to 𝑎 ′ then 𝑋 changes from 𝑥 to 𝑥 ′ . The model is counterfactually fair if:</p><formula xml:id="formula_3">𝑃 (𝐶(𝑥, 𝑎) = 𝑐|𝑋 = 𝑥, 𝐴 = 𝑎) = 𝑃 (𝐶(𝑥, 𝑎 ′ ) = 𝑐|𝑋 = 𝑥, 𝐴 = 𝑎 ′ )<label>(4)</label></formula><p>for all 𝑐 and any value of 𝑎 ′ attainable by 𝐴 <ref type="bibr" target="#b31">[31]</ref>. Figure <ref type="figure" target="#fig_0">1</ref> illustrates a causal graph in a hiring scenario. The sensitive attribute, Gender (G), is derived from Years of Experience (proxy), which directly influences the outcome (Hired/Not Hired). This setup would not be counterfactually fair, as the proxy influences the outcome <ref type="bibr" target="#b32">[32]</ref>. To avoid proxy discrimination, there should be no proxy connections between the sensitive attribute and the outcome <ref type="bibr" target="#b33">[33]</ref>. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.2.">Group Fairness</head><p>Demographic parity: Also called statistical parity, this metric ensures that the acceptance probability is the same (or within a given percentage) across groups <ref type="bibr" target="#b25">[26,</ref><ref type="bibr" target="#b32">32]</ref>. For some tolerance</p><formula xml:id="formula_4">𝜖 = 𝑝 100 ∈ [0, 1], |𝑃 𝑎 (𝐶 = 1) − 𝑃 𝑏 (𝐶 = 1)| ≤ 𝜖<label>(5)</label></formula><p>Equalized Odds: This metric requires that both protected and unprotected groups have equal True Positive and False Positive rates <ref type="bibr" target="#b29">[29,</ref><ref type="bibr" target="#b34">34]</ref>:</p><formula xml:id="formula_5">𝑃 (𝐶 = 1 | 𝐴 = 0, 𝑌 = 𝑦) = 𝑃 (𝐶 = 1 | 𝐴 = 1, 𝑌 = 𝑦), 𝑦 ∈ {0, 1}<label>(6)</label></formula><p>Here, 𝐶 and 𝐴 are independent conditional on 𝑌 .</p><p>Equal Opportunity: This requires that protected and unprotected groups have equal True Positive Rates, focusing on fair positive outcomes <ref type="bibr" target="#b29">[29,</ref><ref type="bibr" target="#b34">34]</ref>:</p><formula xml:id="formula_6">𝑃 𝑎 (𝐶 = 1 | 𝑌 = 1) = 𝑃 𝑏 (𝐶 = 1 | 𝑌 = 1)<label>(7)</label></formula><p>Overall Accuracy Equality: This metric ensures that the prediction accuracy is the same across groups. In an HR context, it ensures that highly qualified and underqualified applicants are treated equally in both protected and unprotected groups <ref type="bibr" target="#b32">[32,</ref><ref type="bibr" target="#b34">34]</ref>:</p><formula xml:id="formula_7">𝑃 𝑎 (𝐶 = 𝑌 ) = 𝑃 𝑏 (𝐶 = 𝑌 )<label>(8)</label></formula><p>Predictive Rate Parity (Sufficiency): This condition is met when the Positive Predictive Value (PPV) and Negative Predictive Value (NPV) are equal for both protected and unprotected groups. It helps prevent disparate mistreatment and promotes fairness <ref type="bibr" target="#b27">[28]</ref>. Specifically, it ensures:</p><formula xml:id="formula_8">𝑃 𝑎 (𝐶 = 1|𝑌 = 1) = 𝑃 𝑏 (𝐶 = 1|𝑌 = 1) (1)<label>(9)</label></formula><p>And for Negative Predictive Value:</p><formula xml:id="formula_9">𝑃 𝑎 (𝐶 = 1|𝑌 = 0) = 𝑃 𝑏 (𝐶 = 1|𝑌 = 0) (2)<label>(10)</label></formula><p>A classifier satisfies Predictive Rate Parity if both conditions (1) and (2) are met <ref type="bibr" target="#b25">[26]</ref>.</p><p>Treatment Equality: A classifier satisfies this condition when the ratio of False Positives and False Negatives is equal across groups <ref type="bibr" target="#b32">[32]</ref>:</p><formula xml:id="formula_10">𝑃 𝑎 (𝐶 = 1 | 𝑌 = 0) 𝑃 𝑎 (𝐶 = 0 | 𝑌 = 1) = 𝑃 𝑏 (𝐶 = 1 | 𝑌 = 0) 𝑃 𝑏 (𝐶 = 0 | 𝑌 = 1)<label>(11)</label></formula><p>Fairness Metrics</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Advantages Disadvantages</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Fairness through unawareness</head><p>Straightforward solution by avoiding explicit use of sensitive attributes Does not consider the correlation between sensitive and non-sensitive attributes</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Fairness through awareness</head><p>Considers both the similarity of individuals and the similarity of outcome distributions. Flexible similarity definition for different scenarios.</p><p>Choice of distance metrics can impact results and may require fine-tuning. Sensitivity to the definition of similarity, which can vary across scenarios.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Counterfactual fairness</head><p>Considers the impact of changes in sensitive attributes on both non-sensitive features and predicted outcomes.</p><p>Requires prior knowledge of causal relationships between sensitive and non-sensitive attributes. Practical implementation may be challenging,when causal relationships are complex.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Demographic Parity</head><p>Promotes fair representation of all demographic groups.</p><p>Rules out an accurate classifier (𝐶 = 𝑌 ), considering it unfair when the base rates of the two groups are significantly different. 𝑃 𝑎 (𝐶 = 1) ̸ = 𝑃 𝑏 (𝐶 = 1).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Overall Accuracy Equality</head><p>Ensures overall accuracy is consistent across different groups. Easy to implement.</p><p>Heavily dependent on the error type. It allows you to make up for rejecting qualified members of one group by accepting unqualified members of another group.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Equalized odds</head><p>Considers both positive and negative predictive performance. Addresses potential disparities in error rates between groups.</p><p>May be sensitive to class imbalances and prevalence differences.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Equal opportunity</head><p>Emphasizes equal opportunities for positive outcomes.</p><p>Does not consider false positive rates, potentially overlooking negative consequences. Similar to equalized odds, may face challenges in practical implementation.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Table 3</head><p>Advantages and Disadvantages of Different Fairness Definitions <ref type="bibr" target="#b17">[18,</ref><ref type="bibr" target="#b29">29]</ref> </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.3.">Mitigation Mechanisms</head><p>Selecting the appropriate fairness metric for a model requires careful consideration of the legal, ethical, and social implications <ref type="bibr" target="#b35">[35]</ref>. As discussed earlier, different fairness metrics offer distinct advantages and disadvantages (see Table <ref type="table">3</ref>). A recent research has demonstrated that it is impossible to satisfy multiple fairness notions simultaneously, creating a challenge in achieving balanced outcomes <ref type="bibr" target="#b23">[24]</ref>.</p><p>A key issue is the trade-off between fairness and accuracy. Incorporating fairness as an objective can reduce accuracy, as the focus shifts from purely optimizing prediction accuracy to balancing it with fairness concerns <ref type="bibr" target="#b34">[34]</ref>. This creates a need for an established trade-off. Bias mitigation algorithms are designed to balance the dual objectives of maintaining model accuracy and ensuring fairness.  <ref type="bibr" target="#b42">[42]</ref>. These approaches can generally be classified into three categories: pre-processing, in-processing, and post-processing techniques. Pre-Processing Mechanisms: Pre-processing techniques involve altering the input data to eliminate bias before training the classifier. This approach is useful when the algorithm is allowed to modify the training data <ref type="bibr" target="#b29">[29]</ref>. Strategies include removing sensitive attributes, adjusting the labels of instances near decision boundaries (as these are more susceptible to discrimination), and applying reweighing techniques to correct imbalances. Recent approaches suggest altering feature representations in a way that reduces bias without altering the core model <ref type="bibr" target="#b35">[35]</ref>.</p><p>In-Processing Mechanisms: In-processing techniques aim to modify the learning algorithm itself to reduce discrimination during model training, while keeping the original training data unchanged <ref type="bibr" target="#b29">[29]</ref>. This can be achieved by introducing a regularization term to the objective function, which penalizes the mutual information between sensitive attributes and predicted outcomes. Alternatively, constraints can be added to ensure that the model satisfies fairness metrics like equalized odds or reduces disparate impact <ref type="bibr" target="#b27">[28,</ref><ref type="bibr" target="#b35">35]</ref>.</p><p>Post-Processing Mechanisms: Post-processing methods adjust the predictions of a trained model to meet fairness criteria, without modifying the model or the training data <ref type="bibr" target="#b29">[29]</ref>. These approaches are useful when the algorithm can only manipulate the learned model. For instance, some methods adjust the labels predicted by the black-box model using a fairness-driven function. Various studies propose techniques that improve equalized odds or equal opportunity by modifying the outcomes after training <ref type="bibr" target="#b34">[34]</ref>. Additionally, it is often suggested to set different thresholds for different groups in a way that both maximizes accuracy and minimizes demographic disparities <ref type="bibr" target="#b35">[35,</ref><ref type="bibr" target="#b43">43]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Study Methodology</head><p>In this section, we address the bias methodologies used to answer our study's research aim. Our goal is to represent an HR decision system as a graph architecture to empirically evaluate the fairness notions of the predicted outcomes. Additionally, we explore the different standard mitigation techniques to generate results with optimum fairness and accuracy.</p><p>In the HR systems, decision-making is inherently comparative, requiring the evaluation of multiple candidates to identify the best fit. Given this complexity, Graph Neural Network (GNN) models are well-suited for such tasks. GNNs excel in scenarios where features exhibit intricate relationships and varying degrees of correlation, which significantly influence prediction outcomes. Indeed, our method involved the evaluation of the effectiveness of three GNN architectures: Graph Convolutional Networks (GCN), Graph Attention Networks (GAT) and Graph Isomorphism Networks (GIN).</p><p>Each GNN model underwent all three phases of bias mitigation: pre-processing techniques, inprocessing, and post-processing. Bias was mitigated using appropriate algorithms at each stage. The model outcomes were evaluated against four key fairness metrics: statistical parity difference (SPD), equal opportunity difference (EOD), overall accuracy equality difference (OAED), and treatment equality difference (TED) <ref type="foot" target="#foot_0">1</ref> . By applying these methods to a relevant dataset, we aim to address key experimental questions -how different GNN architectures perform in reducing bias while maintaining predictive accuracy, and what trade-offs arise between fairness and accuracy when various bias mitigation strategies are applied. The findings are discussed in the next section and they disclose how various GNN designs manage and influence such trade-off.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1.">Data Collection and Pre-Processing</head><p>For this study, the Adult dataset <ref type="bibr" target="#b44">[44]</ref> from the UCI repository has been used. It predicts whether a person's annual income exceeds $50,000. It is a widely recognised dataset containing around 40,000 instances and 16 attributes, plus a target variable (income), collected on the 1994 United States Census. The sensitive attribute in this study was gender, and the target variable was annual income. The income data was converted into binary values as follows:</p><formula xml:id="formula_11">𝑖𝑛𝑐𝑜𝑚𝑒 &gt; 50𝐾 −→ 𝑖𝑛𝑐𝑜𝑚𝑒 = 1 𝑖𝑛𝑐𝑜𝑚𝑒 ≤ 50𝐾 −→ 𝑖𝑛𝑐𝑜𝑚𝑒 = 0</formula><p>The data was sourced from the Center for Machine Learning and Intelligent Systems at the University of California, Irvine, and is available as a comma-separated values CSV file. Since GNNs require data to be in graph form, the K-Nearest Neighbors Graph (K-NNG) method was employed to convert the dataset into a graph structure. K-NNG connects each entity with its 𝑘 most similar neighbors based on a similarity metric. This technique was chosen due to the high density of connections in the dataset, allowing the K-NNG to produce sparse graphs with fewer edges, thereby improving computational efficiency compared to fully connected graphs <ref type="bibr" target="#b45">[45]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2.">Mitigation Mechanisms: A Comparative Analysis</head><p>As previously mentioned, bias mitigation can occur at various stages in model development. These stages include pre-processing (modifying data before training), in-processing (adjusting the training process), and post-processing (modifying outcomes post-training). Each mitigation strategy presents distinct advantages and disadvantages, which are summarized in Table <ref type="table">4</ref>. In our experimental protocol</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Mechanism</head><p>Advantages Disadvantages</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Pre-Processing Mechanisms</head><p>Pre-processed data can be used for any downstream task.No need to modify classifier. No need to access sensitive attributes at test time.</p><p>Mostly used for optimizing before training.May not be able to support all fairness metrics (Statistical Parity or Individual Fairness) due to unavailability of label 𝑌 . Compared to the other two methods does not perform well on accuracy and fairness measures.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>In-Processing Mechanisms</head><p>Good performance on accuracy and fairness measures. Higher flexibility to choose the trade-off between accuracy and fairness measures (depends on specific algorithm). No need to access sensitive attributes at test time.</p><p>Methods are task-specific. Do not generalize well across scenarios. Modification of the classifier might not always be feasible.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Post-Processing Mechanisms</head><p>Highly adaptable as can be applied after training the classifier. Results in relatively good performance on fairness measures.</p><p>No need to modify classifier, simplifying implementation.</p><p>Need to access protected attributes during the testing phase. Lack the flexibility of picking any accuracy-fairness trade-off.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Table 4</head><p>Beside selecting the criterion to measure fairness, it is also need to choose the step in the workflow of a machine learning process in which to apply bias mitigation algorithms. In the table fairness mechanisms are classified conventionally into three categories pre-processing, in-processing and post-processing. Their respective advantages and disadvantages are also outlined <ref type="bibr" target="#b35">[35,</ref><ref type="bibr" target="#b46">46]</ref>.</p><p>we employed various algorithms to mitigate bias, each corresponding to a different mitigation phase as detailed in Table <ref type="table">5</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Mitigation Mechanisms Algorithms Pre-processing Reweighing</head><p>In-processing Prejudice Remover Regularizer Rich Subgroup Fairness Post-processing Reject Option Classification</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Table 5</head><p>Algorithms used for each mitigation phase in this study.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.3.">Mitigation Algorithms</head><p>Reweighing -Reweighing is a pre-processing technique designed to adjust the weights of instances in the dataset to mitigate bias. It does so without relabeling the data. For example, features where sensitive attribute 𝑎 is in the positive class receive higher weights than those in the negative class, and vice versa for attribute 𝑏 <ref type="bibr" target="#b47">[47]</ref>. By adjusting the weights, this method seeks to achieve fairness between protected and unprotected groups <ref type="bibr" target="#b40">[40]</ref>. It is the most ideal algorithm for skewed and imbalanced datasets, which is one of the main causes of bias in HR domain. So, Reweighing was an obvious choice to readjust the representation to balance the different groups in the data, increasing the learning opportunity for the models.</p><p>Prejudice Remover Regularizer (PRR) -Prejudice Remover Regularizer is an in-processing technique that introduces a regularization term into the log-likelihood loss function of a classifier. This term penalises discrimination based on sensitive attributes <ref type="bibr" target="#b48">[48]</ref>. For HR decision systems, along with fairness, accurate decisions are of primordial importance. Thus, this regularisation term is leveraged as a hyperparameter, which is used to control the degree of penalisation, allowing the model to balance accuracy with fairness <ref type="bibr" target="#b49">[49]</ref>.</p><p>Rich Subgroup Fairness (RSF) -Rich Subgroup Fairness aims to go beyond traditional fairness metrics, which may only evaluate fairness across broad categories such as gender. These broader metrics may overlook biases affecting specific subgroups, such as certain gender-ethnicity intersections. RSF mitigates this by considering finer-grained intersections of various attributes, thereby identifying and addressing biases against more specific protected subgroups. Using this algorithm, it is evaluated if the prediction accuracy across all groups is equal, enforcing the matching representation of false positives and false negatives across all groups <ref type="bibr" target="#b51">[50]</ref>.</p><p>Reject Option Classification (ROC) -Reject Option Classification is a post-processing method that works by adjusting predictions in the low-confidence regions of a probabilistic classifier. This approach reduces discrimination by selectively changing the classification of instances from both protected and unprotected groups. The ROC algorithm uses a variety of parameters, including classification thresholds and fairness metrics, to improve fairness <ref type="bibr" target="#b52">[51]</ref>. The process involves swapping predictions (e.g., changing false negatives to true positives) to minimise unfair treatment of different demographic groups. It finds the best confidence bound by itself <ref type="bibr" target="#b47">[47]</ref>. While it is effective at reducing bias, ROC is computationally expensive due to the complexity of tuning multiple parameters. Moreover, the algorithm can slightly reduce the accuracy of the unprotected group while increasing it for the protected group <ref type="bibr" target="#b47">[47]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Experimental Protocol and Discussion of Results</head><p>We organize our experimental runs as following:</p><p>1. As first step we evaluate GCN, GAT, and GIN neural networks on Adult dataset before applying any mitigation technique. 2. Then we evaluate GCN, GAT, and GIN neural networks on Adult dataset after applying mitigation techniques (Reweighing, Prejudice Remover Regularizer, Rich Subgroup Fairness, and Reject Option Classifier). 3. Finally we compared the obtained results and we infer some consideration about their efficacy considering four fairness metrics: Statistical Parity Difference (SPD), Equal Opportunity Difference (EOD), Overall Accuracy Equality Difference (OAED), and Treatment Equality Difference (TED).</p><p>Baseline Performance (Pre-Mitigation) As it is possible to observe in Table <ref type="table" target="#tab_4">6</ref>, all three GNN models showed high accuracy, with GCN achieving slightly better results (approx 85%) than GAT and GIN (approx 84%). This aligns with existing literature that emphasizes GCN's superior ability to aggregate neighborhood information effectively, thus enhancing predictive accuracy <ref type="bibr" target="#b53">[52,</ref><ref type="bibr" target="#b54">53]</ref>. However, despite the strong overall accuracy, fairness metrics shown a different picture. GIN exhibited the largest Equal Opportunity Difference (EOD), highlighting its significant bias in terms of true positive rates across protected and unprotected groups. GCN showed a relatively high Treatment Equality Difference (TED), indicating a bias in error rates between demographic groups. These preliminary results reveal a critical insight: while GNNs perform well in terms of accuracy, they exhibit inherent biases, thus necessitating bias mitigation techniques.  The effectiveness of the bias mitigation strategies, measured in terms of their ability to address fairness concerns without excessively compromising accuracy, is summarized in Table <ref type="table" target="#tab_5">7</ref> and Figures <ref type="figure" target="#fig_1">2a  and 2d</ref>. The following paragraphs analyze each technique's impact.</p><p>Reweighing This pre-processing method, demonstrated notable improvements in fairness metrics but at the cost of a drop in predictive accuracy. Focusing on the GCN architecture, the model's accuracy fell from approx 85% to 78.50%. This reduction is expected, as reweighing tends to penalize the majority class to promote fairness, reducing overall accuracy. On the contrary, SPD seems to be improved fom 0.0098 to 0.0090, reflecting a more balanced distribution of outcomes across groups, and OAED also decreased from 0.0184 to 0.0167, suggesting reduced disparities in identifying positive cases. However, as shown in Figure <ref type="figure" target="#fig_1">2c</ref>, Treatment Equality Difference (TED) increased to 0.1010, suggesting a trade-off. This is a typical side-effect of reweighing, where fairness in positive outcomes can lead to greater disparities in misclassification rates, particularly in error rates across demographic groups. Similar consideration can be made for GAT, although in this case SPD registered a higher value after applying the mitigation technique. Finally, for GIN, only SPD showed a decrease from the raw model suggesting that Reweighing might not be the best technique to use with this type of GNN. Overall this result underscores the well-documented tension between fairness and accuracy: while reweighing can improve outcome parity, it does so at the expense of treatment equality and overall accuracy.</p><p>Prejudice Remover Regularizer (PRR) Prejudice Remover Regularizer, an in-processing technique, showed small variations in accuracy with two models (GCN and GAT) actually performing better, demonstrating its ability to maintain predictive performance. However, every model registered an improvement in only one technique each. For instance, GCN successfully managed to reduce TED (from 0.03 to 0.017) but failed at satisfying the other metrics. The same holds for GAT and GIN which were able to improve the value of OAED and SPD respectively but didn't succeed at enhancing the rest of the metrics. Although PRR did reduce some of the bias present in the model's predictions, its performance, with respect to the four metrics, strongly depended on the type of GNN used. Moreover, it was not effective in addressing outcome disparities measured by EOD.</p><p>Rich Subgroup Fairness (RSF) Rich Subgroup Fairness leads to different impact on model performance and fairness metrics due to its specific approach for handling subgroup disparities. The test accuracy of 84.51 % is nearly identical to the original GCN model's accuracy, which is 84.49 %,same pattern is observed for GAT and GIN maintaining the accuracy at 82.77 % and 84.28 % respectively. It can be observed that SPD increases slightly to 0.0128 for GCN and 0.0050 for GAT, indicating a minor increase in bias regarding the distribution of favorable outcomes across genders. We can observe that RSF aims to be fair across subgroups but not fully able to eliminate all the biases for the GCN and GAT model. On the other hand, RSF worked the better when applied to GIN model when compared with GCN and GAT, since we observed decrease in value for SPD, EOD and OAED, while TED remains almost the same suggesting that RSF has maintained overall predictive performance while addressing fairness.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Reject Option Classification (ROC)</head><p>The Reject Option Classification emerged as one of the most effective post-processing technique in improving accuracy to almost 89% for all three models. SPD dropped to 0.0012 and 0.0011 for GCN and GAT respectively, indicating near-perfect balance in the distribution of favorable outcomes between groups. TED also improved, decreasing to 0.0134 for GCN and 0.0044 for GAT, hence reflecting more balanced error rates across groups.</p><p>Also, OAED was successfully improved for GIN while ROC failed at mitigating bias arising from differences in the true positive rates for protected and unprotected groups.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.">Conclusion</head><p>The results presented in this section confirm the hypothesis posed in our research question: bias mitigation techniques do help reduce bias in GNN architectures, but the trade-off between fairness and accuracy is inevitable. Each technique exhibited distinct strengths and weaknesses, depending on the GNN model it was applied to.</p><p>The GAT architecture, combined with the ROC algorithm, produces the best results, offering an optimized balance between accuracy and fairness. This outcome is expected, as it involves prediction swapping and numerous parameters, which also makes it computationally intensive. While not the top performer, PRR and RSF consistently maintain accuracy across all GNN models, achieving an effective trade-off, particularly when used with GAT. Among the fairness metrics, Treatment Equality showed the most improvement following the application of mitigation techniques, promoting equal error rates across all groups. PRR was the least effective in enhancing fairness metrics, indicating that a standard approach like this, which adjusts representation, struggles to improve fairness in complex real-world data.</p><p>These findings suggest that no single mitigation technique universally outperforms the others in all fairness metrics, and that careful consideration must be given when selecting the appropriate technique based on the specific fairness requirements and constraints of the task at hand.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Future Directions</head><p>The growing body of research in fairness and bias mitigation within machine learning underscores the importance of continued investigation, especially as AI systems increasingly influence social and organizational decision-making. Future work should focus on:</p><p>• Exploring more complex and realistic datasets that encompass multiple sensitive attributes, offering a richer and more representative testing environment. • Expanding the use of alternative GNN models, as variations in model architectures may yield better performance in fairness optimization. • Improving model transparency and interpretability, which will be crucial for building trust in AI-driven HR systems and ensuring these systems are accountable for their decisions.</p><p>Such advancements will enable more refined bias mitigation techniques and foster collaboration between researchers and practitioners to create fairer, more equitable machine learning systems for real-world applications.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: A causal graph representing employee recruitment with G as a sensitive attribute, Years of Experience as a proxy attribute and GPA as a resolving attribute.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: Graph with Left y-axis: fairness evaluation for different models and different techniques. Right y-axis: accuracy of the models. Statistical Parity (2a), Equal Opportunity (2b), Overall Accuracy Equality (2c) and Treatment Equality (2d).</figDesc><graphic coords="10,72.00,581.93,225.62,112.81" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_0"><head>Table 1</head><label>1</label><figDesc>Sources of Bias and Mitigation techniques identified in our study. The sources are divided into arising from polluted training dataset or improper algorithm</figDesc><table><row><cell></cell><cell>Source of Bias</cell><cell>Mitigation Best Practices</cell></row><row><cell>Dataset</cell><cell>Training dataset non-representative Training dataset out of date</cell><cell>-Expand datasets sources -Keep datasets up to date -Blind recruitment</cell></row><row><cell>Algorithm</cell><cell>Unable to formulate assumptions Unable to account for context-specific requirements</cell><cell>-Knowledge Sharing -Third parties Audits</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_1"><head>Applicants Hired Selection Rate Percent Hired 80</head><label></label><figDesc></figDesc><table><row><cell>White</cell><cell>48</cell><cell>48/80</cell><cell>60%</cell></row><row><cell>40 Black</cell><cell>12</cell><cell>12/40</cell><cell>30%</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_2"><head>Table 2</head><label>2</label><figDesc>Example from EEOC guidelines<ref type="bibr" target="#b25">[26]</ref>. The four-fifths rule requires that the selection rate for any protected group should be at least 80% of the highest selection rate among groups. In this case, the highest selection rate is 60%, hence for the other group, i.e. Black, the selection rate should be at least 48%.</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_3"><head></head><label></label><figDesc>These strategies can be applied at different stages of model development. Several methods for bias mitigation have been examined, including the work of Calders and Verwer (2010) [36], Chouldechova (2017) [37], Feldman et al. (2015) [38], Hardy et al. (2016) [39], Kamiran and Calders (2012) [40], Zafar et al. (2017) [41], and Zhang et al. (2018)</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_4"><head>Table 6</head><label>6</label><figDesc>Results before applying Mitigation Techniques. Upward facing arrow (↑) indicates that higher values are better, whereas downward facing arrows (↓) indicate lower values are better.</figDesc><table><row><cell></cell><cell cols="3">Model Test Accuracy (↑) SPD (↓)</cell><cell>EOD (↓)</cell><cell cols="3">OAED (↓) TED (↓)</cell></row><row><cell></cell><cell>GCN</cell><cell>0.8449</cell><cell>0.0098</cell><cell>0.0040</cell><cell cols="2">0.0184</cell><cell>0.0303</cell></row><row><cell></cell><cell>GAT</cell><cell>0.8293</cell><cell>0.0020</cell><cell>0.0028</cell><cell cols="2">0.0067</cell><cell>0.0064</cell></row><row><cell></cell><cell>GIN</cell><cell>0.8429</cell><cell>0.0079</cell><cell>0.0341</cell><cell cols="2">0.0349</cell><cell>0.0083</cell></row><row><cell cols="3">Model Mitigation Technique</cell><cell cols="3">Test Accuracy (↑) SPD (↓)</cell><cell cols="2">EOD (↓)</cell><cell>OAED (↓) TED (↓)</cell></row><row><cell></cell><cell>Reweighing</cell><cell></cell><cell>0.7850</cell><cell>0.0090</cell><cell></cell><cell>0.0212</cell><cell>0.0167</cell><cell>0.1010</cell></row><row><cell>GCN</cell><cell cols="2">Prejudice Remover Regularizer Rich Subgroup Fairness</cell><cell>0.8452 0.8451</cell><cell>0.0160 0.0128</cell><cell></cell><cell>0.0484 0.0334</cell><cell>0.0312 0.0472</cell><cell>0.0170 0.0177</cell></row><row><cell></cell><cell cols="2">Reject Option Classification</cell><cell>0.8826</cell><cell>0.0012</cell><cell></cell><cell>0.0081</cell><cell>0.0301</cell><cell>0.0134</cell></row><row><cell></cell><cell>Reweighing</cell><cell></cell><cell>0.7020</cell><cell>0.0044</cell><cell></cell><cell>0.0081</cell><cell>0.0055</cell><cell>0.0259</cell></row><row><cell>GAT</cell><cell cols="2">Prejudice Remover Regularizer Rich Subgroup Fairness</cell><cell>0.8285 0.8277</cell><cell>0.0066 0.0050</cell><cell></cell><cell>0.0052 0.0062</cell><cell>0.0023 0.0011</cell><cell>0.0146 0.0096</cell></row><row><cell></cell><cell cols="2">Reject Option Classification</cell><cell>0.8833</cell><cell>0.0011</cell><cell></cell><cell>0.0071</cell><cell>0.0098</cell><cell>0.0044</cell></row><row><cell></cell><cell>Reweighing</cell><cell></cell><cell>0.7797</cell><cell>0.0011</cell><cell></cell><cell>0.0343</cell><cell>0.0471</cell><cell>0.1100</cell></row><row><cell>GIN</cell><cell cols="2">Prejudice Remover Regularizer Rich Subgroup Fairness</cell><cell>0.8457 0.8428</cell><cell>0.0023 0.0016</cell><cell></cell><cell>0.0405 0.0082</cell><cell>0.0505 0.0134</cell><cell>0.0095 0.0087</cell></row><row><cell></cell><cell cols="2">Reject Option Classification</cell><cell>0.8968</cell><cell>0.0148</cell><cell></cell><cell>0.0409</cell><cell>0.0348</cell><cell>0.0271</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_5"><head>Table 7</head><label>7</label><figDesc>Results after applying Mitigation Techniques. Upward facing arrow (↑) indicates that higher values are better, whereas downward facing arrows (↓) indicate lower values are better.</figDesc><table /></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_0">Here the GIT repository with the source code: https://github.com/het28/Bias</note>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgments</head><p>This research is partially funded by PNRR project FAIR -Future AI Research (PE00000013), Spoke 6 -Symbiotic AI (CUP H97G22000210007) under the NRRP MUR program funded by the NextGenerationEU.</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">.it 2024: An Overview on the Future of Explainable AI in the era of Large Language Models</title>
		<author>
			<persName><forename type="first">M</forename><surname>Polignano</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Musto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Pellungrini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Purificato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Semeraro</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Setzu</surname></persName>
		</author>
		<author>
			<persName><surname>Xai</surname></persName>
		</author>
		<ptr target="CEUR.org" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of 5th Italian Workshop on Explainable Artificial Intelligence, co-located with the 23rd International Conference of the Italian Association for Artificial Intelligence</title>
				<meeting>5th Italian Workshop on Explainable Artificial Intelligence, co-located with the 23rd International Conference of the Italian Association for Artificial Intelligence<address><addrLine>Bolzano, Italy</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2024">November 25-28, 2024. 2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Third workshop on recommender systems for human resources (recsys in hr 2023</title>
		<author>
			<persName><forename type="first">T</forename><surname>Bogers</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Graus</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kaya</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Johnson</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J.-J</forename><surname>Decorte</surname></persName>
		</author>
		<idno type="DOI">10.1145/3604915.3608755</idno>
		<idno>doi:10.1145/3604915.3608755</idno>
		<ptr target="https://doi.org/10.1145/3604915.3608755" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 17th ACM Conference on Recommender Systems, RecSys &apos;23</title>
				<meeting>the 17th ACM Conference on Recommender Systems, RecSys &apos;23<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="1244" to="1247" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">A review of machine learning applications in human resource management</title>
		<author>
			<persName><forename type="first">S</forename><surname>Garg</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Sinha</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">K</forename><surname>Kar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Mani</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">International Journal of Productivity and Performance Management</title>
		<imprint>
			<biblScope unit="volume">71</biblScope>
			<biblScope unit="page" from="1590" to="1610" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Predictive analytics of hr-a machine learning approach</title>
		<author>
			<persName><forename type="first">V</forename><surname>Kakulapati</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">K</forename><surname>Chaitanya</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">V G</forename><surname>Chaitanya</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Akshay</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Statistics and Management Systems</title>
		<imprint>
			<biblScope unit="volume">23</biblScope>
			<biblScope unit="page" from="959" to="969" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">The Use of Responsible Artificial Intelligence Techniques in the Context of Loan Approval Processes</title>
		<author>
			<persName><forename type="first">E</forename><surname>Purificato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Lorenzo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Fallucchi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">W D</forename><surname>Luca</surname></persName>
		</author>
		<idno type="DOI">10.1080/10447318.2022.2081284</idno>
		<idno>doi:10.1080/ 10447318.2022.2081284</idno>
		<ptr target="https://doi.org/10.1080/10447318.2022.2081284" />
	</analytic>
	<monogr>
		<title level="j">International Journal of Human-Computer Interaction</title>
		<editor>Taylor &amp; Francis</editor>
		<imprint>
			<biblScope unit="page" from="1543" to="1562" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">What&apos;s inside the black-box? a genetic programming method for interpreting complex machine learning models</title>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">P</forename><surname>Evans</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Xue</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Zhang</surname></persName>
		</author>
		<idno type="DOI">10.1145/3321707.3321726</idno>
		<idno>doi:10.1145/3321707. 3321726</idno>
		<ptr target="https://doi.org/10.1145/3321707.3321726" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Genetic and Evolutionary Computation Conference</title>
				<meeting>the Genetic and Evolutionary Computation Conference<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2019">2019</date>
			<biblScope unit="page" from="1012" to="1020" />
		</imprint>
	</monogr>
	<note>GECCO &apos;19</note>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">Interacting with predictions: Visual inspection of black-box machine learning models</title>
		<author>
			<persName><forename type="first">J</forename><surname>Krause</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Perer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Ng</surname></persName>
		</author>
		<idno type="DOI">10.1145/2858036.2858529</idno>
		<idno>doi:10.1145/2858036.2858529</idno>
		<ptr target="https://doi.org/10.1145/2858036.2858529" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems, CHI &apos;16</title>
				<meeting>the 2016 CHI Conference on Human Factors in Computing Systems, CHI &apos;16<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2016">2016</date>
			<biblScope unit="page" from="5686" to="5697" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Practical black-box attacks against machine learning</title>
		<author>
			<persName><forename type="first">N</forename><surname>Papernot</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Mcdaniel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Goodfellow</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Jha</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><forename type="middle">B</forename><surname>Celik</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Swami</surname></persName>
		</author>
		<idno type="DOI">10.1145/3052973.3053009</idno>
		<idno>doi:10.1145/3052973. 3053009</idno>
		<ptr target="https://doi.org/10.1145/3052973.3053009" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2017 ACM on Asia Conference on Computer and Communications Security, ASIA CCS &apos;17</title>
				<meeting>the 2017 ACM on Asia Conference on Computer and Communications Security, ASIA CCS &apos;17<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="506" to="519" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">Discriminated by an algorithm: a systematic review of discrimination and fairness by algorithmic decision-making in the context of hr recruitment and hr development</title>
		<author>
			<persName><forename type="first">A</forename><surname>Köchling</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">C</forename><surname>Wehner</surname></persName>
		</author>
		<idno type="DOI">10.1007/s40685-020-00134-w</idno>
		<ptr target="https://doi.org/10.1007/s40685-020-00134-w.doi:10.1007/s40685-020-00134-w" />
	</analytic>
	<monogr>
		<title level="j">Business Research</title>
		<imprint>
			<biblScope unit="volume">13</biblScope>
			<biblScope unit="page" from="795" to="848" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Does the use of synchrony and artificial intelligence in video interviews affect interview ratings and applicant attitudes?</title>
		<author>
			<persName><forename type="first">H.-Y</forename><surname>Suen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">Y</forename><surname>.-C. Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S.-H</forename><surname>Lu</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.chb.2019.04.012</idno>
		<ptr target="https://doi.org/10.1016/j.chb.2019.04.012" />
	</analytic>
	<monogr>
		<title level="j">Computers in Human Behavior</title>
		<imprint>
			<biblScope unit="volume">98</biblScope>
			<biblScope unit="page" from="93" to="101" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">Combining Graph Neural Networks and Sentence Encoders for Knowledge-aware Recommendations</title>
		<author>
			<persName><forename type="first">G</forename><surname>Spillo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Musto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Polignano</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Lops</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>De Gemmis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Semeraro</surname></persName>
		</author>
		<idno type="DOI">10.1145/3565472.3592965</idno>
		<idno>doi:10.1145/3565472.3592965</idno>
		<ptr target="https://doi.org/10.1145/3565472.3592965" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 31st ACM Conference on User Modeling, Adaptation and Personalization, UMAP 2023</title>
				<meeting>the 31st ACM Conference on User Modeling, Adaptation and Personalization, UMAP 2023<address><addrLine>Limassol, Cyprus</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2023">June 26-29, 2023. 2023</date>
			<biblScope unit="page" from="1" to="12" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Discriminated by an algorithm: a systematic review of discrimination and fairness by algorithmic decision-making in the context of hr recruitment and hr development</title>
		<author>
			<persName><forename type="first">A</forename><surname>Köchling</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">C</forename><surname>Wehner</surname></persName>
		</author>
		<idno type="DOI">10.1007/s40685-020-00134-w</idno>
		<ptr target="https://doi.org/10.1007/s40685-020-00134-w.doi:10.1007/s40685-020-00134-w" />
	</analytic>
	<monogr>
		<title level="j">Business Research</title>
		<imprint>
			<biblScope unit="volume">13</biblScope>
			<biblScope unit="page" from="795" to="848" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">Do Graph Neural Networks Build Fair User Models? Assessing Disparate Impact and Mistreatment in Behavioural User Profiling</title>
		<author>
			<persName><forename type="first">E</forename><surname>Purificato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Boratto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">W</forename><surname>De Luca</surname></persName>
		</author>
		<idno type="DOI">10.1145/3511808.3557584</idno>
		<idno>doi:10.1145/3511808.3557584</idno>
		<ptr target="https://doi.org/10.1145/3511808.3557584" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 31st ACM International Conference on Information &amp; Knowledge Management, CIKM &apos;22</title>
				<meeting>the 31st ACM International Conference on Information &amp; Knowledge Management, CIKM &apos;22<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2022">2022</date>
			<biblScope unit="page" from="4399" to="4403" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Toward a Responsible Fairness Analysis: From Binary to Multiclass and Multigroup Assessment in Graph Neural Network-Based User Modeling Tasks</title>
		<author>
			<persName><forename type="first">E</forename><surname>Purificato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Boratto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">W</forename><surname>De Luca</surname></persName>
		</author>
		<idno type="DOI">10.1007/s11023-024-09685-x</idno>
		<ptr target="https://doi.org/10.1007/s11023-024-09685-x.doi:10.1007/s11023-024-09685-x" />
	</analytic>
	<monogr>
		<title level="j">Minds and Machines</title>
		<imprint>
			<biblScope unit="volume">34</biblScope>
			<biblScope unit="page">33</biblScope>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">What Are We Missing in Algorithmic Fairness? Discussing Open Challenges for Fairness Analysis in User Profiling with Graph Neural Networks</title>
		<author>
			<persName><forename type="first">E</forename><surname>Purificato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">W</forename><surname>De Luca</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-031-37249-0_14</idno>
	</analytic>
	<monogr>
		<title level="m">Advances in Bias and Fairness in Information Retrieval, Communications in Computer and Information Science</title>
				<editor>
			<persName><forename type="first">L</forename><surname>Boratto</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">S</forename><surname>Faralli</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Marras</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">G</forename><surname>Stilo</surname></persName>
		</editor>
		<meeting><address><addrLine>Nature Switzerland; Cham</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="169" to="175" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">Gender bias in hiring: An analysis of the impact of amazon&apos;s recruiting algorithm</title>
		<author>
			<persName><forename type="first">X</forename><surname>Chang</surname></persName>
		</author>
		<idno type="DOI">10.54254/2754-1169/23/20230367</idno>
	</analytic>
	<monogr>
		<title level="j">Advances in Economics, Management and Political Sciences</title>
		<imprint>
			<biblScope unit="volume">23</biblScope>
			<biblScope unit="page" from="134" to="140" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">The case for process fairness in learning: Feature selection for fair decision making</title>
		<author>
			<persName><forename type="first">N</forename><surname>Grgic-Hlaca</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">B</forename><surname>Zafar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">P</forename><surname>Gummadi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Weller</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">NIPS symposium on machine learning and the law</title>
				<meeting><address><addrLine>Barcelona, Spain</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2016">2016</date>
			<biblScope unit="volume">1</biblScope>
			<biblScope unit="page">11</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">Fairness testing: A comprehensive survey and analysis of trends</title>
		<author>
			<persName><forename type="first">Z</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">M</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Hort</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Harman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Sarro</surname></persName>
		</author>
		<idno type="DOI">10.1145/3652155</idno>
		<ptr target="https://doi.org/10.1145/3652155.doi:10.1145/3652155" />
	</analytic>
	<monogr>
		<title level="j">ACM Trans. Softw. Eng. Methodol</title>
		<imprint>
			<biblScope unit="volume">33</biblScope>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">Judgment under uncertainty: Heuristics and biases</title>
		<author>
			<persName><forename type="first">A</forename><surname>Tversky</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Kahneman</surname></persName>
		</author>
		<idno type="DOI">10.1126/science.185.4157.1124</idno>
		<idno>arXiv:</idno>
		<ptr target="https://www.science.org/doi/pdf/10.1126/science.185.4157.1124" />
	</analytic>
	<monogr>
		<title level="j">Science</title>
		<imprint>
			<biblScope unit="volume">185</biblScope>
			<biblScope unit="page" from="1124" to="1131" />
			<date type="published" when="1974">1974</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">Mitigating cognitive biases in developing ai-assisted recruitment systems: A knowledge-sharing approach</title>
		<author>
			<persName><forename type="first">M</forename><surname>Soleimani</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Intezari</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Pauleen</surname></persName>
		</author>
		<idno type="DOI">10.4018/IJKM.290022</idno>
	</analytic>
	<monogr>
		<title level="j">International Journal of Knowledge Management</title>
		<imprint>
			<biblScope unit="volume">18</biblScope>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">Video games in job interviews: Using algorithms to minimize discrimination and unconscious bias</title>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">D</forename><surname>Savage</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Bales</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">ABAJ Lab. &amp; Emp. L</title>
		<imprint>
			<biblScope unit="volume">32</biblScope>
			<biblScope unit="page">211</biblScope>
			<date type="published" when="2016">2016</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">Contextualized BERT Sentence Embeddings for Author Profiling: The Cost of Performances</title>
		<author>
			<persName><forename type="first">M</forename><surname>Polignano</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>De Gemmis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Semeraro</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-030-58811-3_10</idno>
		<idno>doi:</idno>
		<ptr target="10.1007/978-3-030-58811-3\_10" />
	</analytic>
	<monogr>
		<title level="m">Computational Science and Its Applications -ICCSA 2020 -20th International Conference</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">O</forename><surname>Gervasi</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">B</forename><surname>Murgante</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">S</forename><surname>Misra</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">C</forename><surname>Garau</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">I</forename><surname>Blecic</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">D</forename><surname>Taniar</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">B</forename><forename type="middle">O</forename><surname>Apduhan</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><forename type="middle">M A C</forename><surname>Rocha</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">E</forename><surname>Tarantino</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">C</forename><forename type="middle">M</forename><surname>Torre</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Y</forename><surname>Karaca</surname></persName>
		</editor>
		<meeting><address><addrLine>Cagliari, Italy</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2020">July 1-4, 2020. 2020</date>
			<biblScope unit="volume">12252</biblScope>
			<biblScope unit="page" from="135" to="149" />
		</imprint>
	</monogr>
	<note>Proceedings, Part IV</note>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">Is blind recruitment an effective recruitment method?</title>
		<author>
			<persName><forename type="first">R</forename><surname>Vivek</surname></persName>
		</author>
		<idno type="DOI">10.51137/ijarbm.2022.3.3.4</idno>
	</analytic>
	<monogr>
		<title level="j">International Journal of Applied Research in Business and Management</title>
		<imprint>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="page" from="56" to="72" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<analytic>
		<title level="a" type="main">Fairness constraints: A flexible approach for fair classification</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">B</forename><surname>Zafar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Valera</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Gomez-Rodriguez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">P</forename><surname>Gummadi</surname></persName>
		</author>
		<ptr target="http://jmlr.org/papers/v20/18-262.html" />
	</analytic>
	<monogr>
		<title level="j">Journal of Machine Learning Research</title>
		<imprint>
			<biblScope unit="volume">20</biblScope>
			<biblScope unit="page" from="1" to="42" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b24">
	<monogr>
		<author>
			<persName><surname>Euaiact</surname></persName>
		</author>
		<ptr target="https://www.euaiact.com/annex/3" />
		<title level="m">Annex 3 -euaiact</title>
				<imprint>
			<date type="published" when="2024-09-27">2024. 2024-09-27</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<analytic>
		<title level="a" type="main">Ethical considerations in ai-based recruitment</title>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">F</forename><surname>Mujtaba</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">R</forename><surname>Mahapatra</surname></persName>
		</author>
		<idno type="DOI">10.1109/ISTAS48451.2019.8937920</idno>
	</analytic>
	<monogr>
		<title level="m">IEEE International Symposium on Technology and Society (ISTAS)</title>
				<imprint>
			<date type="published" when="2019">2019. 2019</date>
			<biblScope unit="page" from="1" to="7" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b26">
	<analytic>
		<title level="a" type="main">Big data&apos;s disparate impact</title>
		<author>
			<persName><forename type="first">S</forename><surname>Barocas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">D</forename><surname>Selbst</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Calif. L. Rev</title>
		<imprint>
			<biblScope unit="volume">104</biblScope>
			<biblScope unit="page">671</biblScope>
			<date type="published" when="2016">2016</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b27">
	<monogr>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">B</forename><surname>Zafar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Valera</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">G</forename><surname>Rodriguez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">P</forename><surname>Gummadi</surname></persName>
		</author>
		<idno type="DOI">10.1145/3038912.3052660</idno>
		<idno>doi:</idno>
		<ptr target="10.1145/3038912.3052660" />
		<title level="m">Fairness beyond disparate treatment &amp; disparate impact: Learning classification without disparate mistreatment</title>
				<imprint>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page">671</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b28">
	<analytic>
		<title level="a" type="main">All Open Access</title>
	</analytic>
	<monogr>
		<title level="m">Green Open Access</title>
				<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b29">
	<analytic>
		<title level="a" type="main">A survey on bias and fairness in machine learning</title>
		<author>
			<persName><forename type="first">N</forename><surname>Mehrabi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Morstatter</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Saxena</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Lerman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Galstyan</surname></persName>
		</author>
		<idno type="DOI">10.1145/3457607</idno>
		<idno>doi:</idno>
		<ptr target="10.1145/3457607" />
	</analytic>
	<monogr>
		<title level="j">ACM Comput. Surv</title>
		<imprint>
			<biblScope unit="volume">54</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b30">
	<analytic>
		<title level="a" type="main">Fairness through awareness</title>
		<author>
			<persName><forename type="first">C</forename><surname>Dwork</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Hardt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Pitassi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Reingold</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Zemel</surname></persName>
		</author>
		<idno type="DOI">10.1145/2090236.2090255</idno>
		<ptr target="https://doi.org/10.1145/2090236.2090255.doi:10.1145/2090236.2090255" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 3rd Innovations in Theoretical Computer Science Conference, ITCS &apos;12</title>
				<meeting>the 3rd Innovations in Theoretical Computer Science Conference, ITCS &apos;12<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2012">2012</date>
			<biblScope unit="page" from="214" to="226" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b31">
	<analytic>
		<title level="a" type="main">Counterfactual fairness</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kusner</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Loftus</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Russell</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Silva</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 31st International Conference on Neural Information Processing Systems, NIPS&apos;17</title>
				<meeting>the 31st International Conference on Neural Information Processing Systems, NIPS&apos;17<address><addrLine>Red Hook, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Curran Associates Inc</publisher>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="4069" to="4079" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b32">
	<analytic>
		<title level="a" type="main">Fairness definitions explained</title>
		<author>
			<persName><forename type="first">S</forename><surname>Verma</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Rubin</surname></persName>
		</author>
		<idno type="DOI">10.1145/3194770.3194776</idno>
		<idno>doi:10.1145/3194770.3194776</idno>
		<ptr target="https://doi.org/10.1145/3194770.3194776" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the International Workshop on Software Fairness, FairWare &apos;18</title>
				<meeting>the International Workshop on Software Fairness, FairWare &apos;18<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2018">2018</date>
			<biblScope unit="page" from="1" to="7" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b33">
	<analytic>
		<title level="a" type="main">Avoiding discrimination through causal reasoning</title>
		<author>
			<persName><forename type="first">N</forename><surname>Kilbertus</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Rojas-Carulla</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Parascandolo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Hardt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Janzing</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Schölkopf</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 31st International Conference on Neural Information Processing Systems, NIPS&apos;17</title>
				<meeting>the 31st International Conference on Neural Information Processing Systems, NIPS&apos;17<address><addrLine>Red Hook, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Curran Associates Inc</publisher>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="656" to="666" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b34">
	<analytic>
		<title level="a" type="main">Equality of opportunity in supervised learning</title>
		<author>
			<persName><forename type="first">M</forename><surname>Hardt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Price</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Srebro</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 30th International Conference on Neural Information Processing Systems, NIPS&apos;16</title>
				<meeting>the 30th International Conference on Neural Information Processing Systems, NIPS&apos;16<address><addrLine>Red Hook, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Curran Associates Inc</publisher>
			<date type="published" when="2016">2016</date>
			<biblScope unit="page" from="3323" to="3331" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b35">
	<monogr>
		<author>
			<persName><forename type="first">D</forename><surname>Pessach</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Shmueli</surname></persName>
		</author>
		<idno>ArXiv abs/2001.09784</idno>
		<ptr target="https://api.semanticscholar.org/CorpusID:210921184" />
		<title level="m">Algorithmic fairness</title>
				<imprint>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b36">
	<analytic>
		<title level="a" type="main">How to be fair? a study of label and selection bias</title>
		<author>
			<persName><forename type="first">M</forename><surname>Favier</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Calders</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Pinxteren</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Meyer</surname></persName>
		</author>
		<idno type="DOI">10.1007/s10994-023-06401-1</idno>
		<idno>06401-1</idno>
		<ptr target="org/10.1007/s10994-023-" />
	</analytic>
	<monogr>
		<title level="j">Machine Learning</title>
		<imprint>
			<biblScope unit="volume">112</biblScope>
			<biblScope unit="page" from="5081" to="5104" />
			<date type="published" when="2010">2010</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b37">
	<analytic>
		<title level="a" type="main">Fair prediction with disparate impact: A study of bias in recidivism prediction instruments</title>
		<author>
			<persName><forename type="first">A</forename><surname>Chouldechova</surname></persName>
		</author>
		<idno type="DOI">10.1089/big.2016.0047</idno>
	</analytic>
	<monogr>
		<title level="j">Big Data</title>
		<imprint>
			<biblScope unit="volume">5</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="153" to="163" />
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b38">
	<analytic>
		<title level="a" type="main">Extending the impacts of hostile media perceptions: Influences on discussion and opinion polarization in the context of climate change</title>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">S</forename><surname>Hart</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Feldman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Leiserowitz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Maibach</surname></persName>
		</author>
		<idno type="DOI">10.1177/1075547015592067</idno>
		<ptr target="https://doi.org/10.1177/1075547015592067.doi:10.1177/1075547015592067" />
	</analytic>
	<monogr>
		<title level="j">Science Communication</title>
		<imprint>
			<biblScope unit="volume">37</biblScope>
			<biblScope unit="page" from="506" to="532" />
			<date type="published" when="2015">2015</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b39">
	<analytic>
		<title level="a" type="main">Interventions to address potentially inappropriate prescribing in community-dwelling older adults: A systematic review of randomized controlled trials</title>
		<author>
			<persName><forename type="first">B</forename><surname>Clyne</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Fitzgerald</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Quinlan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Hardy</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Galvin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Fahey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">M</forename><surname>Smith</surname></persName>
		</author>
		<idno type="DOI">10.1111/jgs.14133</idno>
		<ptr target="https://doi.org/10.1111/jgs.14133" />
	</analytic>
	<monogr>
		<title level="j">Journal of the American Geriatrics Society</title>
		<imprint>
			<biblScope unit="volume">64</biblScope>
			<biblScope unit="page" from="1210" to="1222" />
			<date type="published" when="2016">2016</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b40">
	<analytic>
		<title level="a" type="main">Data preprocessing techniques for classification without discrimination</title>
		<author>
			<persName><forename type="first">F</forename><surname>Kamiran</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Calders</surname></persName>
		</author>
		<idno type="DOI">10.1007/s10115-011-0463-8</idno>
		<ptr target="https://doi.org/10.1007/s10115-011-0463-8.doi:10.1007/s10115-011-0463-8" />
	</analytic>
	<monogr>
		<title level="j">Knowledge and Information Systems</title>
		<imprint>
			<biblScope unit="volume">33</biblScope>
			<biblScope unit="page" from="1" to="33" />
			<date type="published" when="2012">2012</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b41">
	<analytic>
		<title level="a" type="main">Fairness Constraints: Mechanisms for Fair Classification</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">B</forename><surname>Zafar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Valera</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">G</forename><surname>Rogriguez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">P</forename><surname>Gummadi</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 20th International Conference on Artificial Intelligence and Statistics</title>
				<editor>
			<persName><forename type="first">A</forename><surname>Singh</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><surname>Zhu</surname></persName>
		</editor>
		<meeting>the 20th International Conference on Artificial Intelligence and Statistics<address><addrLine>PMLR</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2017">2017</date>
			<biblScope unit="volume">54</biblScope>
			<biblScope unit="page" from="962" to="970" />
		</imprint>
	</monogr>
	<note>Proceedings of Machine Learning Research</note>
</biblStruct>

<biblStruct xml:id="b42">
	<analytic>
		<title level="a" type="main">Mitigating unwanted biases with adversarial learning</title>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">H</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Lemoine</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Mitchell</surname></persName>
		</author>
		<idno type="DOI">10.1145/3278721.3278779</idno>
		<idno>doi:10.1145/3278721.3278779</idno>
		<ptr target="https://doi.org/10.1145/3278721.3278779" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, AIES &apos;18</title>
				<meeting>the 2018 AAAI/ACM Conference on AI, Ethics, and Society, AIES &apos;18<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2018">2018</date>
			<biblScope unit="page" from="335" to="340" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b43">
	<analytic>
		<title level="a" type="main">Algorithmic decision making and the cost of fairness</title>
		<author>
			<persName><forename type="first">S</forename><surname>Corbett-Davies</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Pierson</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Feller</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Goel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Huq</surname></persName>
		</author>
		<idno type="DOI">10.1145/3097983.3098095</idno>
		<idno>doi:10.1145/3097983. 3098095</idno>
		<ptr target="https://doi.org/10.1145/3097983.3098095" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD &apos;17</title>
				<meeting>the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD &apos;17<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="797" to="806" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b44">
	<monogr>
		<title level="m" type="main">Adult, UCI Machine Learning Repository</title>
		<author>
			<persName><forename type="first">B</forename><surname>Becker</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Kohavi</surname></persName>
		</author>
		<idno type="DOI">10.24432/C5XW20</idno>
		<idno>doi:</idno>
		<ptr target="https://doi.org/10.24432/C5XW20" />
		<imprint>
			<date type="published" when="1996">1996</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b45">
	<analytic>
		<title level="a" type="main">Being prepared in a sparse world: The case of knn graph construction</title>
		<author>
			<persName><forename type="first">A</forename><surname>Boutet</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A.-M</forename><surname>Kermarrec</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Mittal</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Taiani</surname></persName>
		</author>
		<idno type="DOI">10.1109/ICDE.2016.7498244</idno>
	</analytic>
	<monogr>
		<title level="m">IEEE 32nd International Conference on Data Engineering (ICDE)</title>
				<imprint>
			<date type="published" when="2016">2016. 2016</date>
			<biblScope unit="page" from="241" to="252" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b46">
	<monogr>
		<title level="m" type="main">Enhancing fairness and performance in machine learning models: A multi-task learning approach with monte-carlo dropout and pareto optimality</title>
		<author>
			<persName><forename type="first">K</forename><surname>Zanna</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Sano</surname></persName>
		</author>
		<idno>ArXiv abs/2404.08230</idno>
		<ptr target="https://api.semanticscholar.org/CorpusID:269137478" />
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b47">
	<analytic>
		<title level="a" type="main">Bias in algorithms: On the trade-off between accuracy and fairness</title>
		<author>
			<persName><forename type="first">P</forename><surname>Janssen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">M</forename><surname>Sadowski</surname></persName>
		</author>
		<ptr target="https://hdl.handle.net/10419/238032" />
	</analytic>
	<monogr>
		<title level="m">23rd Biennial Conference of the International Telecommunications Society (ITS): &quot;Digital societies and industrial transformations: Policies, markets, and technologies in a post-Covid world</title>
				<meeting><address><addrLine>Gothenburg, Sweden; ITS); Calgary</addrLine></address></meeting>
		<imprint>
			<publisher>International Telecommunications Society</publisher>
			<date type="published" when="2021-06-23">21st-23rd June, 2021. 2021</date>
		</imprint>
	</monogr>
	<note>Online Conference /</note>
</biblStruct>

<biblStruct xml:id="b48">
	<analytic>
		<title level="a" type="main">Fairness-aware classifier with prejudice remover regularizer</title>
		<author>
			<persName><forename type="first">T</forename><surname>Kamishima</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Akaho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Asoh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Sakuma</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Machine Learning and Knowledge Discovery in Databases</title>
				<editor>
			<persName><forename type="first">P</forename><forename type="middle">A</forename><surname>Flach</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">T</forename><surname>De Bie</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">N</forename><surname>Cristianini</surname></persName>
		</editor>
		<meeting><address><addrLine>Berlin Heidelberg; Berlin, Heidelberg</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2012">2012</date>
			<biblScope unit="page" from="35" to="50" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b49">
	<analytic>
		<title level="a" type="main">Conscientious classification: A data scientist&apos;s guide to discrimination-aware classification</title>
		<author>
			<persName><forename type="first">B</forename><surname>Alessandro</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>O'neil</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Lagatta</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Big Data</title>
		<imprint>
			<biblScope unit="volume">5</biblScope>
			<biblScope unit="page">120</biblScope>
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b50">
	<monogr>
		<title/>
		<idno type="DOI">10.1089/big.2016.0048</idno>
		<idno>arXiv:</idno>
		<ptr target="pMID:28632437" />
		<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b51">
	<analytic>
		<title level="a" type="main">Preventing fairness gerrymandering: Auditing and learning for subgroup fairness</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kearns</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Neel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Roth</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><forename type="middle">S</forename><surname>Wu</surname></persName>
		</author>
		<ptr target="https://proceedings.mlr.press/v80/kearns18a.html" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 35th International Conference on Machine Learning</title>
				<editor>
			<persName><forename type="first">J</forename><surname>Dy</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><surname>Krause</surname></persName>
		</editor>
		<meeting>the 35th International Conference on Machine Learning<address><addrLine>PMLR</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2018">2018</date>
			<biblScope unit="volume">80</biblScope>
			<biblScope unit="page" from="2564" to="2572" />
		</imprint>
	</monogr>
	<note>Proceedings of Machine Learning Research</note>
</biblStruct>

<biblStruct xml:id="b52">
	<analytic>
		<title level="a" type="main">Decision theory for discrimination-aware classification</title>
		<author>
			<persName><forename type="first">F</forename><surname>Kamiran</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Karim</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Zhang</surname></persName>
		</author>
		<idno type="DOI">10.1109/ICDM.2012.45</idno>
	</analytic>
	<monogr>
		<title level="m">IEEE 12th International Conference on Data Mining</title>
				<imprint>
			<date type="published" when="2012">2012. 2012</date>
			<biblScope unit="page" from="924" to="929" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b53">
	<analytic>
		<title level="a" type="main">Graph convolutional network based on higherorder neighborhood aggregation</title>
		<author>
			<persName><forename type="first">G.-F</forename><surname>Ma</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X.-H</forename><surname>Yang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Ye</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y.-J</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Jiang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Neural Information Processing</title>
				<editor>
			<persName><forename type="first">T</forename><surname>Mantoro</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Lee</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><forename type="middle">A</forename><surname>Ayu</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">K</forename><forename type="middle">W</forename><surname>Wong</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><forename type="middle">N</forename><surname>Hidayanto</surname></persName>
		</editor>
		<meeting><address><addrLine>Cham</addrLine></address></meeting>
		<imprint>
			<publisher>Springer International Publishing</publisher>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="334" to="342" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b54">
	<analytic>
		<title level="a" type="main">Improved graph representation learning based on neighborhood aggregation and interaction fusion</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Liu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Ning</surname></persName>
		</author>
		<idno type="DOI">10.3233/JIFS-234086</idno>
		<ptr target="https://doi.org/10.3233/JIFS-234086.doi:10.3233/JIFS-234086" />
	</analytic>
	<monogr>
		<title level="j">Journal of Intelligent &amp; Fuzzy Systems</title>
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
