<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">Mitigation of Popularity Bias in Recommendation Systems</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Sabrina</forename><surname>Karboua</surname></persName>
							<email>sabrina.karboua@univ-setif.dz</email>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Departement of Computer Science</orgName>
								<orgName type="department" key="dep2">College of Sciences</orgName>
								<orgName type="institution">Ferhat ABBAS Setif</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University</orgName>
								<address>
									<postCode>19000</postCode>
									<settlement>Setif</settlement>
									<country key="DZ">Algeria</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="department">Optics and Precision Mechanics Institute</orgName>
								<orgName type="laboratory">Mechatronics Laboratory</orgName>
								<orgName type="institution">Ferhat ABBAS University Setif</orgName>
								<address>
									<postCode>19000</postCode>
									<settlement>Setif</settlement>
									<country key="DZ">Algeria</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Fouzi</forename><surname>Harrag</surname></persName>
							<email>fouzi.harrag@univ-setif.dz</email>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Departement of Computer Science</orgName>
								<orgName type="department" key="dep2">College of Sciences</orgName>
								<orgName type="institution">Ferhat ABBAS Setif</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University</orgName>
								<address>
									<postCode>19000</postCode>
									<settlement>Setif</settlement>
									<country key="DZ">Algeria</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="department">Optics and Precision Mechanics Institute</orgName>
								<orgName type="laboratory">Mechatronics Laboratory</orgName>
								<orgName type="institution">Ferhat ABBAS University Setif</orgName>
								<address>
									<postCode>19000</postCode>
									<settlement>Setif</settlement>
									<country key="DZ">Algeria</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Farid</forename><surname>Meziane</surname></persName>
							<email>f.meziane@derby.ac.uk</email>
							<affiliation key="aff2">
								<orgName type="department">Data Science Research Centre</orgName>
								<orgName type="institution">University of Derby</orgName>
								<address>
									<addrLine>Markeaton Street</addrLine>
									<postCode>DE22 3AW</postCode>
									<settlement>Derby</settlement>
									<country key="GB">UK</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Amal</forename><surname>Boutadjine</surname></persName>
							<email>boutadjine.amal@univ-setif.dz</email>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Departement of Computer Science</orgName>
								<orgName type="department" key="dep2">College of Sciences</orgName>
								<orgName type="institution">Ferhat ABBAS Setif</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University</orgName>
								<address>
									<postCode>19000</postCode>
									<settlement>Setif</settlement>
									<country key="DZ">Algeria</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="department">Optics and Precision Mechanics Institute</orgName>
								<orgName type="laboratory">Mechatronics Laboratory</orgName>
								<orgName type="institution">Ferhat ABBAS University Setif</orgName>
								<address>
									<postCode>19000</postCode>
									<settlement>Setif</settlement>
									<country key="DZ">Algeria</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">Mitigation of Popularity Bias in Recommendation Systems</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">5969D8D51745A891C94DFE75E9DD593A</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-25T07:11+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Popularity bias</term>
					<term>Recommender System</term>
					<term>Fairness</term>
					<term>Mitigation</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>In response to the quantity of information available on the Internet, many online service providers are attempting to customize their services and make content access more simple via recommender systems (RSs) to support users in discovering the products they are most likely interested in. However, these recommendation systems are prone to popularity bias, which is a tendency to promote popular items even if they do not satisfy a user's preferences and then provide customers with recommendations of poor quality. Such a bias has a negative influence on both users and item providers. It is then essential to mitigate such bias in order to guarantee that less popular but pertinent items show up on the user's recommendation list. In this work, we conduct an empirical analysis of different mitigation techniques for popularity bias to provide an overview of the present state of the art of popularity bias and raise the fairness issue in RSs.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>Recommendation systems (RSs) have been enormously effective in solving the issue of information overload, in which users struggle to discover information they are interested in. We encounter RSs, for example, on Netflix to recommend movies, on Spotify for music recommendations, on online educational websites to recommend courses like Coursera, when we get purchase recommendations on e-commerce websites like Amazon, and when we are advised of new connections on social networking platforms like Facebook. A recommender system's performance is often assessed in terms of multiple criteria like accuracy, diversity, novelty, and fairness. Fairness in RSs is a notion that has lately gained a lot of interest because developing biased recommendation systems inhibits customers from discovering items that are not extremely popular yet are good fits for them. Popularity bias is one factor that contributes to unfairness in RSs as it prevents diverse items from having an equal opportunity of recommendation and exposure. This is a phenomenon in which the most popular items get more and more exposure, while less popular items receive less exposure <ref type="bibr" target="#b0">[1]</ref>. Popularity bias occurs because popular items often have significantly more rating data than less popular ones. This is because the recommender systems are trained on user preferences, and normally, many users rate the popular items while the less popular items earn just a few ratings, which implies that the unpopular items that constitute the so-called long tail of recommendations, do not gain enough exposure, particularly when they are new to the system <ref type="bibr" target="#b1">[2]</ref>. Training RSs models on biased data promotes the recommendation of popular items more often than less popular ones. The more rating data the items have, the more they are suggested, and the more the items get recommended, the more ratings they earn from users, and then "the rich get richer, and the poor get poorer" <ref type="bibr" target="#b2">[3]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Definition of Popularity Bias</head><p>Various researchers have studied the idea of popularity bias, which is also known by other names like Matthew effect <ref type="bibr" target="#b3">[4]</ref>, long-tail recommendation <ref type="bibr" target="#b4">[5,</ref><ref type="bibr" target="#b5">6]</ref> and aggregate diversity <ref type="bibr" target="#b6">[7]</ref>. All of these words refer to the fact that just a few popular items are regularly recommended, whereas most unpopular items are seldom recommended. The popularity bias of item 𝑖 is calculated mathematically by dividing the number of users who gave item 𝑖 a rating by the total number of users <ref type="bibr" target="#b0">[1]</ref>.</p><formula xml:id="formula_0">𝑃 𝑂𝑃 (𝑖) = ∑︀ 𝑢∈𝑈 1(𝑖 ∈ 𝑝 𝑢 ) |𝑈 | ,</formula><p>where 𝑈 represents the list of users and 𝑝 𝑢 represents the list of items rated by user 𝑢.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Empirical Examples of Popularity Bias and Its Impacts in RSs</head><p>Popularity bias has a negative influence not only on the consumers of a recommender system, but also on the providers, and the system itself. Bias toward popular items may influence consumption of less popular ones, hinders them from being popular in the future and then harm the recommendation fairness. The effects of this bias can be better understood through real-world examples. According to Abdollahpouri et al. <ref type="bibr" target="#b2">[3]</ref>, popularity bias homogenizes the market by allowing only a few item producers to dominate it, resulting in fewer chances for innovation and originality. Boratto et al. <ref type="bibr" target="#b7">[8]</ref> state that in the educational field, an online educational platform recommendation may exhibit undesired bias and subsequent educational amplifications. They demonstrate that a few popular courses evaluated by a large number of learners might cause the platform to suffer from popularity bias, preventing new courses from appearing at the top of the suggested list. As a result, the platform may be controlled by a few well-known courses and consequently few teachers.</p><p>In the music and movie domains, the analysis by <ref type="bibr" target="#b2">[3,</ref><ref type="bibr" target="#b8">9,</ref><ref type="bibr" target="#b9">10]</ref> reveals that most of items in the long-tail category do not receive enough exposure and attention, whereas few popular items are recommended regularly. They highlight also that majority of consumers that have little interest in popular songs/ movies, would be disadvantaged by the recommendation algorithm since it tends to recommend popular items, and this leads to an under-recommendation of unpopular items. Consequently, users that are interested in unpopular songs/ movie will get lower recommendation than those interested in popular music/ movie. The findings of <ref type="bibr" target="#b10">[11,</ref><ref type="bibr" target="#b11">12]</ref> in the book domain show that the most frequently used algorithms are unable to pique users' interest in niche books and instead promote largely popular books, and users with niche preferences get much poorer quality recommendation than users with mainstream tastes.</p><p>The research <ref type="bibr" target="#b12">[13]</ref> investigates the presence of popularity bias in the online dating websites and its implications for users' chances of meeting dating partners. They find that even if the recommendation algorithm recommends popular users more often, popular users are likely to accept invitations and messages from those other non-popular users, leading to inferior matching results. Gharahighehi al. <ref type="bibr" target="#b13">[14]</ref> state that it is quite likely that RSs promote the popularity bias by recommending only very popular articles, ignoring articles by less popular authors whether their writings are important to a certain set of readers. This unfair recommendation might have detrimental effects for these journalists, as they may eventually lose faith in the platfrom, affecting other parties as well as the whole system.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Mitigation Techniques</head><p>Most popularity bias mitigation algorithms are categorized into two types: in-processing algorithms and post-processing algorithms.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.1.">In-processing Techniques</head><p>These approaches include altering the recommender system algorithm during training or introducing new RSs algorithms to incorporate fairness into the model.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>1-Causal Graph.</head><p>A causal graph is a directed acyclic graph, in which each node represents a variable and each edge represents a causal relationship between two nodes. Zhang et al. <ref type="bibr" target="#b14">[15]</ref> demonstrate that between the exposed items and the observed interactions, item popularity functions as a confounder, resulting in the negative effect of bias amplification. They use the causal graph to analyze how item popularity influences the recommendation process.</p><p>The causal graph of traditional recommender methods only considers that the interaction probability (𝐶) of an exposed item (𝐼) that is consumed by user 𝑈 is determined by the match between user interest and item property (𝑈, 𝐼 → 𝐶). Zhang et al. <ref type="bibr" target="#b14">[15]</ref> enrich the graph by adding the item popularity represented as node Z, which considers that the interaction probability is based on the user, the exposed item, and the popularity of item (𝑈, 𝐼, 𝐸 → 𝐶).  To overcome popularity bias, Wei et al. <ref type="bibr" target="#b15">[16]</ref> disentangle the effects of item popularity and user interest from causal inference. They first create a causal graph to represent the key cause-effect relationships in the recommendation model, and assume that the probability of an interaction is influenced by the user-item matching, user conformity and item popularity. Then, they propose a model-agnostic counterfactual reasoning (MACR) framework for training the causal graph-based recommender model and performe counterfactual inference in the recommendation inference step to reduce popularity bias. Another study similar to <ref type="bibr" target="#b15">[16]</ref> is produced by Zheng et al. <ref type="bibr" target="#b16">[17]</ref>. They make the assumption that each click is the result of union of two separate causes: item's popularity and user's interest. Then, they employ different embeddings for the user's interest and item's popularity, in order to have two embeddings for each user or item. After that, they train the model under the framework of multi-task learning using cause-specific data to constrain each embedding to represent only one cause and then achieve disentanglement between popularity embedding and interest embedding. Furthermore, direct disentanglement supervision is used to achieve greater independence across embeddings of various causes. Wang et al. <ref type="bibr" target="#b17">[18]</ref> present a novel framework DecRS that explicitly incorporates causal effect of user representation on prediction score and used an approximation operator for backdoor adjustment to eliminate the misleading correlation induced by the confounder, and then present a user-specific inference technique for dynamically regulating the impact of backdoor adjustment based on user state. He et al. <ref type="bibr" target="#b18">[19]</ref> present a novel framework named Mitigating Popularity Prejudice in Recommendation through Counterfactual Inference (MPCI) to eliminate popularity bias from both the data and model perspectives. MPCI used click data to capture user preference representations and then extracted biased data to learn popularity bias representations to estimate how popularity bias affects the prediction score in a causal graph, which may then be used to mitigate its detrimental effect by counterfactual inference. A collection of both positive and negative samples are included in each representation in order to measure the popularity bias prediction score and the user preference prediction score using a fusion function. The two scores and the expectation constant are then combined, and used as the counterfactual inference's input to finally predict the user's prediction score for each item.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Popularity Bias Mitigation techniques</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Post-processing In-processing</head><note type="other">Regularization</note><p>Zhao et al. <ref type="bibr" target="#b18">[19]</ref> state that not all popularity bias is harmful, because some items are more popular because they have intrinsically better properties, and removing the popularity bias completely would deteriorate the model performance. They argue that popularity bias is affected by two factors: static item quality and the dynamic conformity effect (which refers to user who follows community standards while diverging from his personal preferences). Therefore, it is critical to distinguish between the desirable popularity bias induced by item quality and conformity that leads to bad popularity bias. To achieve this goal, they propose a novel Time-aware DisEntangled framework (TIDE) using causal graph to model that a click is generated by three component: conformity effect (𝐶), item's quality (𝑄) and item-user matching score(𝑀 ) returned by the recommendation model. Then, they apply a causal intervention during the inference stage on the conformity module to help the prediction avoids the undesirable conformity impact and benefits from the item quality and interest matching.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>2-Variational Auto Encoder.</head><p>VAEs are Multi Layer Perceptrons (MLP)-based generative models that have recently been presented as a strong method for building Collaborative Filtering (CF) recommenders and minimizing bias.</p><p>To reduce popularity bias and boosting diversity in results, Borges et al. <ref type="bibr" target="#b19">[20]</ref> propose to adapt the VAE by adding a penalty constraint to the reconstruction error (decoder) to boost the visibility of unpopular items as follow:</p><formula xml:id="formula_1">𝑙𝑜𝑔 𝑝(𝑥 𝑢 |𝑧 𝑢 ) = 𝑁 ∑︁ 𝑖=0 𝑥 𝑢𝑖 𝑙𝑜𝑔𝜋 𝑖 (𝑧 𝑢 ).𝜆,</formula><p>The penalty term 𝜆 has the effect of lowering the score of the most popular of all items by multiplying it by 0, while keeping the niche items at their initial score multiplied by 1, and it is defined as follow:</p><formula xml:id="formula_2">𝜆 = 1 − 𝑤(𝑥 𝑖 ) − min(𝑤(𝑥)) max(𝑤(𝑥)) − min(𝑤(𝑥))</formula><p>,</p><p>where 𝑤(𝑥) returns numbers of interactions for item 𝑥.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>3-Regularization.</head><p>It has been used in matrix factorisation algorithms to control overfitting by limiting the size of the latent components. Kiswanto et al. <ref type="bibr" target="#b20">[21]</ref> propose a fairness-aware regularization learning-to-rank method to make the weight of unpopular items higher without affecting the recommender system ranking performance by recommending a balanced mix of popular and unpopular items to users.</p><p>Abdollahpouri et al. <ref type="bibr" target="#b21">[22]</ref> investigate how regularization may be used to mitigate the recommender system's popularity bias. They start with an optimization goal of the form:</p><formula xml:id="formula_3">min 𝑃,𝑄</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>𝑎𝑐𝑐(𝑃, 𝑄) + 𝜆 𝑟𝑒𝑔(𝑃, 𝑄),</head><p>where 𝑎𝑐𝑐 represents the accuracy target, 𝑟𝑒𝑔 is the regularization term, in which the 𝐿𝑎𝑝𝐷𝑄 regularizer was used, and 𝜆 is a coefficient for managing the regularizer effect. The 𝐿𝑎𝑝𝐷𝑄 regularizer has the form 𝑡𝑟(𝑄 𝑇 𝐿 𝐷 𝑄), where 𝑡𝑟 is the trace of the matrix D, 𝐿 𝐷 is the Laplacian matrix of D. 𝐷 𝑖,𝑗 = 1 if item 𝑖 and 𝑗 are from the same set (popular or unpopular items), and 0 if items 𝑖 and 𝑗are from different set, and 𝑄 is the item latent representation matrix. Zhu et al. <ref type="bibr" target="#b22">[23]</ref> calculate the correlation between the predicted preference score for a positive user-item and the popularity of matched items using the square of the Pearson regularizer and then, reduce the bias by reducing this regularization term coupled with the recommendation error using the following form:</p><formula xml:id="formula_4">min 𝑃,𝑄 𝐿 𝑅𝑒𝑐 + 𝜆 𝑃 𝑃 𝐶(𝑅 ˆ+, 𝑝𝑜𝑝(𝐼)) 2 ,</formula><p>where 𝐿 𝑅𝑒𝑐 represents the loss of recommendation models, 𝜆 is the trade-off weight, and 𝑃 𝑃 𝐶(.) is the Pearson correlation coefficient between expected scores and items popularity. 𝑅 îs the predicted user-item preference matrix, and 𝑃 𝑂𝑃 (.) is the item popularity. <ref type="bibr">Krishnan et al.[24]</ref> propose an adversarial training technique to mitigate the bias. The adversary network 𝐷 learns in the feedback data the implicit relationship structure of items, and correlate niche item recommendation of the base recommender 𝐺 with popular items in the user's history, while the base recommender model 𝐷 is concurrently trained with the adversary network to replicate these associations while avoiding the adversarial penalty until mutual convergence. More specifically, The adversary D is taught to differentiate synthetic pairs of popular and unpopular items selected from 𝐺 and real pairings of popular and unpopular items obtained from the global co-occurrence of matrix 𝑋. When the synthetic and real niche-popular pairings match with the association structure acquired by D, the model converges.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>4-Adversarial Training.</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>5-Knowledge Graph.</head><p>Based on the essential assumption that a user often has several preferences prompting him to consume diverse items, Wei et al. <ref type="bibr" target="#b24">[25]</ref> suggest a framework to mitigate popularity bias from the users' perspective using knowledge graph, which profiles user-item connections throughout the knowledge graph using fine-grained preferences, and then eliminate a proportion of popularity preference for distinct users. The framework seeks to apply Knowledge Graph integrated with popularity nodes to decrease popularity bias by determining the fine-grained preferences of users. It initially builds a heterogeneous network by combining Knowledge Graph, preference graph, and popularity nodes. The embeddings of item, user, preference and the mutual attention parameters were then learned by applying a heterogeneous graph transformer to the heterogeneous graph while matching fine-grained preferences with the relations in Knowledge Graph. Finally, popularity preference is removed adaptively based on the user's interest in popular items to reduce the bias.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>6-Adaptive Boosting.</head><p>Inspired by the fair boosting technique on classification. Gangwar et al. <ref type="bibr" target="#b25">[26]</ref> provide an algorithm named "FairBoost" that minimizes the popularity bias existing in the data while preserving accuracy within reasonable bounds, by increasing the weights of the unpopular items, which are typically under-represented in the data and then, maintain a balance between popular and unpopular items.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2.">Post-Processing Techniques</head><p>This kind of techniques may be applied to any recommender system's output by reordering the original recommended list based on some constraints.</p><p>1-Knapsack Optimization model. Seymen et al. <ref type="bibr" target="#b26">[27]</ref> propose a re-ranking optimization model to find the best system solution given diverse constraints to deal with various different problems like popularity bias, diversity and provider fairness. They start by defining a baseline model to increase the overall recommended items' average predicted ratings, with the restriction that each user gets K recommendation.</p><formula xml:id="formula_5">1 𝐾|𝑈 | ∑︁ 𝑖∈𝐼 ∑︁ 𝑢∈𝑈 𝑟 𝑖𝑢 𝑥 𝑖𝑢 ,</formula><p>where I is the set of items 𝑎𝑛𝑑U is the set of users, K is the number of items to be recommended to each user, 𝑟 𝑖𝑢 represents the predicted rating for user u ∈ U and item i ∈ I . x ij is a binary value (an item is either recommended or not) that specifies items that are recommended, where x ij = 1 if item i is recommended to user u and 0 otherwise.</p><p>Then, They extend the base model by adding a knapsack constraint (Pop-Opt) to leverage the popularity of all recommended items as follows.</p><formula xml:id="formula_6">∑︁ 𝑖∈𝐼 ∑︁ 𝑢∈𝑈 𝑥 𝑖𝑢 𝜔 𝑖 ≤ 𝛼,</formula><p>where 𝛼 represents the maximum bound on the overall popularity of the recommended items and 𝜔 𝑖 calculates the item's popularity i by dividing the number of ratings received by item i by the sum of all the ratings for all other items in the system.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>2-Personilazed diversification approach.</head><p>To decrease the effect of popularity bias, a new post-processing method is proposed by Abdollahpouri et al. <ref type="bibr" target="#b27">[28]</ref> by reordering the initial recommendation list provided by the recommender algorithm. They improve a diversification technique' aim is to achieve the required compromise between accuracy and long-tail coverage (improve the representation of underrepresented items). Their strategy is based on an algorithm named the eXplicit Query Aspect Diversification (xQuAD), which is developed to diversify query results so it addresses a wide range of query components. They apply the following criterion on the final recommendation list to maintain a balance between ratio of popular items and niche items as follow:</p><p>𝑃 (𝑣|𝑢) + 𝜆 𝑃 (𝑣, 𝑆 ′ |𝑢), 𝑃 (𝑣|𝑢) indicates the probability that a user 𝑢 ∈ 𝑈 (list of users) is interested in item 𝑣 ∈ 𝑉 (list of items), and 𝑃 (𝑣, 𝑆 ′ |𝑘) reflects the probability that user 𝑢 will be interested in item 𝑣 and that 𝑣 is not already in S (new re-ordered list). The first term emphasizes diversity between popular and unpopular items, whereas the second term encourages scoring accuracy. In general, 𝜆 parameter affects how highly the regulating of popularity bias is weighted. The item with the highest score is included in the final list 𝑆, then repeated the procedure until 𝑆 achieves required length. In order to produce more diversified recommendation that includes both popular Γ and unpopular Γ' items, the marginal probability 𝑃 (𝑣, 𝑆 ′ |𝑢) is computed as:</p><formula xml:id="formula_7">𝑃 (𝑣, 𝑆 ′ |𝑢) = ∑︁ 𝑑∈Γ,Γ ′ 𝑃 (𝑑|𝑢)𝑃 (𝑣|𝑑) ∏︁ 𝑖∈𝑆 (1 − 𝑃 (𝑖|𝑑𝑆),</formula><p>where 𝑃 (𝑑|𝑢) is an indicator of user preference across distinct item groups, 𝑃 (𝑑|𝑣) is equal to 1 if item 𝑖 in the original recommended list 𝑆 includes category 𝑑 and 0 otherwise.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>3-Calibrated popularity.</head><p>A distributional discrepancies in the groups (short-head, medium, tail) to which items belong between the user's profile and its recommended list is measured. The principle of calibrated popularity (CP) is that for example, if a user likes 10% popular items, 30% items of medium popularity, and 40% unpopular items, the recommended list should comprise 10% popular items, 30% items of medium popularity, and 40% niche items <ref type="bibr" target="#b28">[29]</ref>. The Calibrated Popularity method generates a final recommendation list 𝐿 𝑢 for each user 𝑢 from an initial recommended list 𝑆 created by a base recommender. A weighted sum of relevance and calibration is used and maximized as follow</p><formula xml:id="formula_8">𝐹 𝑢 = 𝑎𝑟𝑔 𝑚𝑎𝑥(1 − 𝜆).𝑅𝑒𝑙(𝐿 𝑢 ) − 𝜆.ℑ(𝑃, 𝑄(𝐿 𝑢 )),</formula><p>where 𝜆 is the weight regulating popularity calibration vs the relevance, 𝑅𝑒𝑙( 𝐿𝑢 ) is the total of the predicted scores for items in 𝐿 𝑢 . ℑ represents the jenson-shanon divergence between the recommendation profile and recommendation list items.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>4-Popularity Compensation.</head><p>Another research <ref type="bibr" target="#b22">[23]</ref> introduces another re-ranking strategy that alters the predicted user-item preference matrix by compensating low-popularity items so that they have greater preference scores and therefore, they are ranked higher. This compensation is based on three key principles:</p><p>-Should be dependent on item popularity, with less popular items receiving higher compensation.</p><p>-Compensation should be also dependent on user preferences; items that have higher possibility of being preferred by a user should be rewarded more. The algorithm therefore assures that items that are not liked by a user and has a low popularity will not be recommended by mistake.</p><p>-Compensation should be based on each user's value scale: item candidates for a user with a higher value scale should be compensated more. This ensures that consumers with high value scales of estimated scores receive adequate compensation for items, ensuring that the algorithm is useful to all users.</p><p>For one item 𝑖 given user 𝑢, the popularity compensation score is calculated using the following equation:</p><formula xml:id="formula_9">𝐶 𝑢,𝑖 = 1 𝑝𝑜𝑝(𝑖)</formula><p>.(𝑅 ˆ𝑢,𝑖 .𝛽 + 1 − 𝛽),</p><p>where 𝑅 ˆ𝑢,𝑖 is the predicted preference scores from user 𝑢 to items produced by the algorithm. 1/𝑝𝑜𝑝(𝑖) used to achieve the first condition. 𝑅 𝑢,𝑖 .𝛽 + 1 − 1 to achieve the second condition. 𝛽 ∈ [0, 1] is a trade-off weight used to adjust the predicted preference score ratio in the compensation.</p><p>Condition 3 is calculated using the following equation:</p><p>𝑅 * ˆ𝑢,𝑖 = 𝑅 ˆ𝑢,𝑖 .𝛼 + 𝐶 𝑢,𝑖 .𝑛 𝑢 /𝑚 𝑢 ,</p><p>where 𝑅 ˆ* 𝑢,𝑖 is the new preference score from 𝑢 to 𝑖, 𝛼 represents the algorithm's trade-off weight, and 𝑛 𝑢 /𝑚 𝑢 used to normalize the compensation scores (𝑛 𝑢 represents predicted scores norm for user 𝑢 and 𝑚 𝑢 is the compensation scores norm of 𝑢 excepting those for items in the interacted item set in the training data).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.">Conclusion</head><p>In this paper, we explained the issue of popularity bias in Recommenders System (RSs), then we systematically provided a mitigation techniques classification into two approaches in-processing (model based) and post-processing (re-ranking). The first approach is based on improving current algorithms by adding a constraint into the objective function or propose new algorithms. In the second approach, the original recommended list is re-ordered based on some constraints to mitigate the popularity bias.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Classification of popularity bias mitigation techniques</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: proposed causal graph<ref type="bibr" target="#b14">[15]</ref> </figDesc><graphic coords="4,239.74,545.89,115.80,62.70" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 3 :</head><label>3</label><figDesc>Figure 3: proposed causal graph<ref type="bibr" target="#b15">[16]</ref> </figDesc><graphic coords="5,247.69,190.97,99.90,55.50" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Figure 4 :</head><label>4</label><figDesc>Figure 4: proposed causal graph by<ref type="bibr" target="#b16">[17]</ref> </figDesc><graphic coords="5,250.84,421.45,93.60,73.68" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Figure 5 :</head><label>5</label><figDesc>Figure 5: proposed causal graph by<ref type="bibr" target="#b18">[19]</ref> </figDesc><graphic coords="6,254.74,301.03,85.80,71.40" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0"><head></head><label></label><figDesc></figDesc><graphic coords="1,0.00,190.95,595.28,459.99" type="bitmap" /></figure>
		</body>
		<back>
			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<monogr>
		<author>
			<persName><forename type="first">H</forename><surname>Abdollahpouri</surname></persName>
		</author>
		<title level="m">Popularity bias in recommendation: a multi-stakeholder perspective</title>
				<imprint>
			<date type="published" when="2020">2020</date>
		</imprint>
		<respStmt>
			<orgName>University of Colorado at Boulder</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">Ph.D. thesis</note>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Investigating and counteracting popularity bias in group recommendations</title>
		<author>
			<persName><forename type="first">E</forename><surname>Yalcin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Bilge</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Information Processing &amp; Management</title>
		<imprint>
			<biblScope unit="volume">58</biblScope>
			<biblScope unit="page">102608</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<monogr>
		<author>
			<persName><forename type="first">H</forename><surname>Abdollahpouri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Mansoury</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Burke</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Mobasher</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1907.13286</idno>
		<title level="m">The unfairness of popularity bias in recommendation</title>
				<imprint>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Do not blame it on the algorithm: an empirical assessment of multiple recommender systems and their impact on content diversity, Information</title>
		<author>
			<persName><forename type="first">J</forename><surname>Möller</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Trilling</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Helberger</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Van Es</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Communication &amp; Society</title>
		<imprint>
			<biblScope unit="volume">21</biblScope>
			<biblScope unit="page" from="959" to="977" />
			<date type="published" when="2018">2018</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Controlling popularity bias in learning-to-rank recommendation</title>
		<author>
			<persName><forename type="first">H</forename><surname>Abdollahpouri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Burke</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Mobasher</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the eleventh ACM conference on recommender systems</title>
				<meeting>the eleventh ACM conference on recommender systems</meeting>
		<imprint>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="42" to="46" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<monogr>
		<author>
			<persName><forename type="first">H</forename><surname>Yin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Cui</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Yao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Chen</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1205.6700</idno>
		<title level="m">Challenging the long tail recommendation</title>
				<imprint>
			<date type="published" when="2012">2012</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">Improving aggregate recommendation diversity using rankingbased techniques</title>
		<author>
			<persName><forename type="first">G</forename><surname>Adomavicius</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Kwon</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Transactions on Knowledge and Data Engineering</title>
		<imprint>
			<biblScope unit="volume">24</biblScope>
			<biblScope unit="page" from="896" to="911" />
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">The effect of algorithmic bias on recommender systems for massive open online courses</title>
		<author>
			<persName><forename type="first">L</forename><surname>Boratto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Fenu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Marras</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">European Conference on Information Retrieval</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2019">2019</date>
			<biblScope unit="page" from="457" to="472" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">The unfairness of popularity bias in music recommendation: A reproducibility study</title>
		<author>
			<persName><forename type="first">D</forename><surname>Kowald</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Schedl</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Lex</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">European conference on information retrieval</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2020">2020</date>
			<biblScope unit="page" from="35" to="42" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Global and country-specific mainstreaminess measures: Definitions, analysis, and usage for improving personalized music recommendation systems</title>
		<author>
			<persName><forename type="first">C</forename><surname>Bauer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Schedl</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">PloS one</title>
		<imprint>
			<biblScope unit="volume">14</biblScope>
			<biblScope unit="page">e0217389</biblScope>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<monogr>
		<author>
			<persName><forename type="first">M</forename><surname>Naghiaei</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><forename type="middle">A</forename><surname>Rahmani</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Dehghan</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2202.13446</idno>
		<title level="m">The unfairness of popularity bias in book recommendation</title>
				<imprint>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b11">
	<monogr>
		<author>
			<persName><forename type="first">M</forename><surname>Naghiaei</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><forename type="middle">A</forename><surname>Rahmani</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Dehghan</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2202.13446</idno>
		<title level="m">The unfairness of popularity bias in book recommendation</title>
				<imprint>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b12">
	<monogr>
		<title level="m" type="main">Popularity bias in online dating platforms: Theory and empirical evidence</title>
		<author>
			<persName><forename type="first">M</forename><surname>Celdir</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S.-H</forename><surname>Cho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">H</forename><surname>Hwang</surname></persName>
		</author>
		<idno>SSRN 4053204</idno>
		<imprint>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
	<note type="report_type">Available at</note>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Fair multi-stakeholder news recommender system with hypergraph ranking</title>
		<author>
			<persName><forename type="first">A</forename><surname>Gharahighehi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Vens</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Pliakos</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Information Processing &amp; Management</title>
		<imprint>
			<biblScope unit="volume">58</biblScope>
			<biblScope unit="page">102663</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">Causal intervention for leveraging popularity bias in recommendation</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Feng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>He</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Wei</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Song</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Ling</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Zhang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval</title>
				<meeting>the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval</meeting>
		<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="11" to="20" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">Model-agnostic counterfactual reasoning for eliminating popularity bias in recommender system</title>
		<author>
			<persName><forename type="first">T</forename><surname>Wei</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Feng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Wu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Yi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>He</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery &amp; Data Mining</title>
				<meeting>the 27th ACM SIGKDD Conference on Knowledge Discovery &amp; Data Mining</meeting>
		<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="1791" to="1800" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">Disentangling user interest and conformity for recommendation with causal embedding</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Zheng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Gao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>He</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Jin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Web Conference 2021</title>
				<meeting>the Web Conference 2021</meeting>
		<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="2980" to="2991" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">Deconfounded recommendation for alleviating bias amplification</title>
		<author>
			<persName><forename type="first">W</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Feng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>He</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T.-S</forename><surname>Chua</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery &amp; Data Mining</title>
				<meeting>the 27th ACM SIGKDD Conference on Knowledge Discovery &amp; Data Mining</meeting>
		<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="1717" to="1725" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">Mitigating popularity bias in recommendation via counterfactual inference</title>
		<author>
			<persName><forename type="first">M</forename><surname>He</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Hu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Wang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">International Conference on Database Systems for Advanced Applications</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2022">2022</date>
			<biblScope unit="page" from="377" to="388" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">On mitigating popularity bias in recommendations via variational autoencoders</title>
		<author>
			<persName><forename type="first">R</forename><surname>Borges</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Stefanidis</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 36th Annual ACM Symposium on Applied Computing</title>
				<meeting>the 36th Annual ACM Symposium on Applied Computing</meeting>
		<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="1383" to="1389" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">Fairness aware regularization on a learning-to-rank recommender system for controlling popularity bias in e-commerce domain</title>
		<author>
			<persName><forename type="first">D</forename><surname>Kiswanto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Nurjanah</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Rismala</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">2018 International Conference on Information Technology Systems and Innovation (ICITSI), IEEE</title>
				<imprint>
			<date type="published" when="2018">2018</date>
			<biblScope unit="page" from="16" to="21" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">Controlling popularity bias in learning-to-rank recommendation</title>
		<author>
			<persName><forename type="first">H</forename><surname>Abdollahpouri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Burke</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Mobasher</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the eleventh ACM conference on recommender systems</title>
				<meeting>the eleventh ACM conference on recommender systems</meeting>
		<imprint>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="42" to="46" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">Popularity-opportunity bias in collaborative filtering</title>
		<author>
			<persName><forename type="first">Z</forename><surname>Zhu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>He</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Caverlee</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 14th ACM International Conference on Web Search and Data Mining</title>
				<meeting>the 14th ACM International Conference on Web Search and Data Mining</meeting>
		<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="85" to="93" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<analytic>
		<title level="a" type="main">An adversarial approach to improve long-tail performance in neural collaborative filtering</title>
		<author>
			<persName><forename type="first">A</forename><surname>Krishnan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Sharma</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Sankar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Sundaram</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 27th ACM International Conference on information and knowledge management</title>
				<meeting>the 27th ACM International Conference on information and knowledge management</meeting>
		<imprint>
			<date type="published" when="2018">2018</date>
			<biblScope unit="page" from="1491" to="1494" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">Adaptive alleviation for popularity bias in recommender systems with knowledge graph</title>
		<author>
			<persName><forename type="first">F</forename><surname>Wei</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Jin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Zhou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Wu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Security and Communication Networks</title>
		<imprint>
			<biblScope unit="page">2022</biblScope>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<monogr>
		<title level="m" type="main">An adaptive boosting technique to mitigate popularity bias in recommender system</title>
		<author>
			<persName><forename type="first">A</forename><surname>Gangwar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Jain</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2109.05677</idno>
		<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b26">
	<analytic>
		<title level="a" type="main">A unified optimization toolbox for solving popularity bias, fairness, and diversity in recommender systems</title>
		<author>
			<persName><forename type="first">S</forename><surname>Seymen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Abdollahpouri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">C</forename><surname>Malthouse</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">MORS@ RecSys</title>
				<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b27">
	<analytic>
		<title level="a" type="main">Managing popularity bias in recommender systems with personalized re-ranking</title>
		<author>
			<persName><forename type="first">H</forename><surname>Abdollahpouri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Burke</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Mobasher</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">The thirty-second international flairs conference</title>
				<imprint>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b28">
	<analytic>
		<title level="a" type="main">User-centered evaluation of popularity bias in recommender systems</title>
		<author>
			<persName><forename type="first">H</forename><surname>Abdollahpouri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Mansoury</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Burke</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Mobasher</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Malthouse</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 29th ACM Conference on User Modeling, Adaptation and Personalization</title>
				<meeting>the 29th ACM Conference on User Modeling, Adaptation and Personalization</meeting>
		<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="119" to="129" />
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
