<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">A Novel Multidimensional Framework for Evaluating Recommender Systems</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Artus</forename><surname>Krohn-Grimberghe</surname></persName>
						</author>
						<author role="corresp">
							<persName><forename type="first">Lars</forename><surname>Schmidt-Thieme</surname></persName>
							<email>schmidt-thieme@ismll.de</email>
						</author>
						<author>
							<affiliation key="aff0">
								<orgName type="department">Information Systems and Machine Learning Lab</orgName>
								<orgName type="institution">University of Hildesheim</orgName>
								<address>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff1">
								<orgName type="department">Machine Learning Lab</orgName>
								<orgName type="laboratory">Alexandros Nanopoulos Information Systems</orgName>
								<orgName type="institution">University of Hildesheim</orgName>
								<address>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff2">
								<orgName type="department">Information Systems and Machine Learning Lab</orgName>
								<orgName type="institution">University of Hildesheim</orgName>
								<address>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff3">
								<orgName type="laboratory">Centric Evaluation of Recommender Systems and Their Interfaces (UCERSTI)</orgName>
								<address>
									<addrLine>Sep 30</addrLine>
									<postCode>2010</postCode>
									<settlement>Barcelona</settlement>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff4">
								<orgName type="laboratory">Centric Evaluation of Recommender Systems and Their Interfaces (UCERSTI)</orgName>
								<address>
									<addrLine>Sep 30</addrLine>
									<postCode>2010</postCode>
									<settlement>Barcelona</settlement>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff5">
								<orgName type="laboratory">Centric Evaluation of Recommender Systems and Their Interfaces (UCERSTI)</orgName>
								<address>
									<addrLine>Sep 30</addrLine>
									<postCode>2010</postCode>
									<settlement>Barcelona</settlement>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff6">
								<orgName type="laboratory">Centric Evaluation of Recommender Systems and Their Interfaces (UCERSTI)</orgName>
								<address>
									<addrLine>Sep 30</addrLine>
									<postCode>2010</postCode>
									<settlement>Barcelona</settlement>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff7">
								<orgName type="laboratory">Centric Evaluation of Recommender Systems and Their Interfaces (UCERSTI)</orgName>
								<address>
									<addrLine>Sep 30</addrLine>
									<postCode>2010</postCode>
									<settlement>Barcelona</settlement>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">A Novel Multidimensional Framework for Evaluating Recommender Systems</title>
					</analytic>
					<monogr>
						<imprint>
							<date/>
						</imprint>
					</monogr>
					<idno type="MD5">2583C795A028AD45CDF908BC88191005</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-23T22:49+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Recommender Systems</term>
					<term>Recommendation</term>
					<term>Multidimensional Analysis</term>
					<term>OLAP</term>
					<term>Exploratory Data Analysis</term>
					<term>Performance Analysis</term>
					<term>Data Warehouse Knijnenburg</term>
					<term>B.P.</term>
					<term>Schmidt-Thieme</term>
					<term>L.</term>
					<term>Bollen</term>
					<term>D. FULL PAPER Knijnenburg</term>
					<term>B.P.</term>
					<term>Schmidt-Thieme</term>
					<term>L.</term>
					<term>Bollen</term>
					<term>D Knijnenburg</term>
					<term>B.P.</term>
					<term>Schmidt-Thieme</term>
					<term>L.</term>
					<term>Bollen</term>
					<term>D Knijnenburg</term>
					<term>B.P.</term>
					<term>Schmidt-Thieme</term>
					<term>L.</term>
					<term>Bollen</term>
					<term>D. FULL PAPER Knijnenburg</term>
					<term>B.P.</term>
					<term>Schmidt-Thieme</term>
					<term>L.</term>
					<term>Bollen</term>
					<term>D Knijnenburg</term>
					<term>B.P.</term>
					<term>Schmidt-Thieme</term>
					<term>L.</term>
					<term>Bollen</term>
					<term>D Knijnenburg</term>
					<term>B.P.</term>
					<term>Schmidt-Thieme</term>
					<term>L.</term>
					<term>Bollen</term>
					<term>D. FULL PAPER Knijnenburg</term>
					<term>B.P.</term>
					<term>Schmidt-Thieme</term>
					<term>L.</term>
					<term>Bollen</term>
					<term>D</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>The popularity of recommender systems has led to a large variety of their application. This, however, makes their evaluation a challenging problem, because different and often contrasting criteria are established, such as accuracy, robustness, and scalability. In related research, usually only condensed numeric scores such as RMSE or AUC or F-measure are used for evaluation of an algorithm on a given data set. It is obvious that these scores are insufficient to measure user satisfaction.</p><p>Focussing on the requirements of business and research users, this work proposes a novel, extensible framework for the evaluation of recommender systems. In order to ease user-driven analysis we have chosen a multidimensional approach. The research framework advocates interactive visual analysis, which allows easy refining and reshaping of queries. Integrated actions such as drill-down or slice/dice, enable the user to assess the performance of recommendations in terms of business criteria such as increase in revenue, accuracy, prediction error, coverage and more.</p><p>The ability of the proposed framework to comprise an effective way for evaluating recommender systems in a businessuser-centric way is shown by experimental results using a research prototype.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">INTRODUCTION</head><p>The popularity of recommender systems has resulted in a large variety of their applications, ranging from presenting personalized web-search results over identifying preferred multimedia content (movies, songs) to discovering friends in social networking sites. This broad range of applications, however, makes the evaluation of recommender systems a challenging problem. The reason is the different and often contrasting criteria that are being involved in real-world applications of recommender systems, such as their accuracy, robustness, and scalability.</p><p>The vast majority of related research usually evaluates recommender system algorithms with condensed numeric scores: root mean square error (RMSE) or mean absolute error (MAE) for rating prediction, or measures usually stemming from information retrieval such as precision/recall or F-measure for item prediction. Evidently, although such measures can indicate the performance of algorithms regarding some perspectives of recommender systems' applications, they are insufficient to cover the whole spectrum of aspects involved in most real-world applications. As an alternative approach towards characterizing user experience as a whole, several studies employ user-based evaluations. These studies, though, are usually rather costly, difficult in design and implementation.</p><p>More importantly, when recommender systems are deployed in real-world applications, notably e-commerce, their evaluation should be done by business analysts and not necessarily by recommender-system researchers. Thus, the evaluation should be flexible on testing recommender algorithms according to business analysts' needs using interactive queries and parameters. What is, therefore, required is to provide support for evaluation of recommender systems' performance based on popular online analytical processing (OLAP) operations. Combined with support for visual analysis, actions such as drill-down or slice/dice, allow assessment of the performance of recommendations in terms of business objectives. For instance, business analysts may want to examine various performance measures at different levels (e.g., hierarchies in categories of recommended products), detect trends in time (e.g., elevation of average product rating following a change in the user interface), or segment the customers and identify the recommendation quality with respect to each customer group. Furthermore, the interactive and visual nature of this process allows easy adaptation of the queries according to insights already gained.</p><p>In this paper, we propose a novel approach to the evaluation of recommender systems. Based on the aforementioned motivation factors, the proposed methodology builds on multidimensional analysis, allowing the consideration of various aspects important for judging the quality of a recommender system in terms of real-world applications. We describe a way for designing and developing the proposed extensible multidimensional framework, and provide insights into its applications. This enables integration, combination and comparison of both, the presented and additional, measures (metrics).</p><p>To assess the benefits of the proposed framework, we have implemented a research prototype and now present experimental results that demonstrate its effectiveness.</p><p>Our main contributions are summarized as follows:</p><p>• A flexible multidimensional framework for evaluating recommender systems.</p><p>• A comprehensive procedure for efficient development of the framework in order to support analysis of both, dataset facets and algorithms' performance using interactive OLAP queries (e.g., drill-down, slice, dice).</p><p>• The consideration of an extended set of evaluation measures, compared to standards such as the RMSE.</p><p>• Experimental results with intuitive outcomes based on swift visual analysis.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">RELATED WORK</head><p>For general analysis of recommender systems, Breese <ref type="bibr" target="#b5">[5]</ref> and Herlocker et al. <ref type="bibr" target="#b11">[11]</ref> provide a comprehensive overview of evaluation measures with the aim of establishing comparability between recommender algorithms. Nowadays, the generally employed measures within the prevailing recommender tasks are MAE, (R)MSE, precision, recall, and Fmeasure. In addition further measures including confidence, coverage and diversity related measures are discussed but not yet broadly used. Especially the latter two have attracted attention over the last years as it is still not certain whether today's predictive accuracy or precision and recall related measures correlate directly with interestingness for a system's end users. As such various authors proposed and argued for new evaluation measures <ref type="bibr" target="#b22">[22,</ref><ref type="bibr" target="#b21">21,</ref><ref type="bibr" target="#b6">6]</ref>. Ziegler <ref type="bibr" target="#b22">[22]</ref> has analyzed the effect of diversity with respect to user satisfaction and introduced topic diversification and intra-list similarity as concepts for the recommender system community. Zhang and Hurley <ref type="bibr" target="#b21">[21]</ref> have improved the intra-list similarity and suggested several solution strategies to the diversity problem. Celma and Herrera <ref type="bibr" target="#b6">[6]</ref> have addressed the closely related novelty problem and propose several technical measures for coverage and similarity of item recommendation lists. All these important contributions focus on reporting single aggregate numbers per dataset and algorithm. While our framework can deliver those, too, it goes beyond that by its capability of combining the available measures and, most importantly, dissecting them among one or more dimensions.</p><p>Analysis of the end users' response to recommendations and their responses' correlation with the error measures used in research belongs to the field of Human-Recommender Interaction. It is best explored by user studies and large scale experiments, but both are very expensive to obtain and thus rarely conducted and rather small in scale. Select studies are <ref type="bibr" target="#b13">[13,</ref><ref type="bibr" target="#b14">14,</ref><ref type="bibr" target="#b4">4]</ref>. Though in the context of classical information retrieval, Joachims et al <ref type="bibr" target="#b13">[13]</ref> have conducted a highly relevant study on the biasing effect of the position an item has within a ranked list. In the context of implicit feedback vs. explicit feedback Jones et al <ref type="bibr" target="#b14">[14]</ref> have conducted an important experiment on the preferences of users concerning recommendations generated by unobtrusively collected implicit feedback compared to recommendations based on explicitly stated preferences. Bollen et al. <ref type="bibr" target="#b4">[4]</ref> have researched the effect of recommendation list length in combination with recommendation quality on perceived choice satisfaction. They found that for high quality recommendations, longer lists tend to overburden the user with difficult choice decisions. Against the background of those results we believe that for initial research on a dataset, forming an idea, checking if certain effects are present, working on collected data with a framework like the one presented is an acceptable proxy.</p><p>With findings gained in this process, conducting meaningful user studies is an obvious next step.</p><p>Recent interesting findings with respect to dataset characteristics are e.g. the results obtained during the Netflix challenge <ref type="bibr" target="#b3">[3,</ref><ref type="bibr" target="#b17">17]</ref> on user and item base-effects and time-effects in data. When modeled appropriately, they have a noteworthy effect on recommender performance. The long time it took to observe these properties of the dataset might be an indicator for the fact that with currently available tools proper analysis of the data at hand is more difficult and tedious than it should be. This motivates the creation of easy-touse tools enabling thorough analysis of the datasets and the recommender algorithm's results and presenting results in an easy to consume way for the respective analysts.</p><p>Notable work regarding the integration of OLAP and recommender systems stems from the research of Adomavicius et al. <ref type="bibr" target="#b2">[2,</ref><ref type="bibr" target="#b1">1]</ref>. They treat the recommender problem setting with its common dimensions of users, items, and rating as inherently multidimensional. But unlike this work, they focus on the multidimensionality of the generation of recommendations and on the recommenders themselves being multidimensional entities that can be queried like OLAP cubes (with a specifically derived query language, RQL). In contrast, our work acknowledges the multidimensional nature of recommender systems, but focusses on their multidimensional evaluation.</p><p>Existing frameworks for recommender systems analysis usually focus on the automatic selection of one recommendation technique over another. E.g., <ref type="bibr" target="#b10">[10]</ref> is focussed on an API that allows retrieval and derivation of user satisfaction with respect to the recommenders employed. The AWESOME system by Thor and Rahm <ref type="bibr" target="#b20">[20]</ref>, the closest approach to that presented here, shares the data warehousing approach, the description of the necessary data preparation (ETL), and the insight of breaking down the measures used for recommender performance analysis by appropriate categories. But contrary to the approach presented here, the AWESOME framework is solely focussed on website performance and relies on static SQL-generated reports and decision criteria. Furthermore, it incorporates no multidimensional approach and does not aim at simplifying end-user-centric analysis or interactive analysis at all.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">FRAMEWORK REQUIREMENTS</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1">The Role of a Multidimensional Model</head><p>Business analysts expect all data of a recommender systems (information about items, generated recommendations, user preferences, etc.) to be organized around business entities in form of dimensions and measures based on a multidimensional model. A multidimensional model enforces structure upon data and expresses relationships between data elements <ref type="bibr" target="#b19">[19]</ref>. Such a model, thus, allows business analysts to investigate all aspects of their recommender system by using the popular OLAP technology <ref type="bibr" target="#b7">[7]</ref>. This technology provides powerful analytical capabilities that business analysts can query to detect trends, patterns and anomalies within the modeled measures of recommender systems' performance across all involved dimensions.</p><p>Multidimensional modeling provides comprehensibility for the business analysts by organizing entities and attributes of their recommender systems in a parent-child relationship (1:N in databases terminology), into dimensions that are identified by a set of attributes. For instance, the dimension of recommended items may have as attributes the name of the product, its type, its brand and category, etc. For the business analyst, the attributes of a dimension represent a specific business view on the facts (or key performance indicators), which are derived from the intersection entities. The attributes of a dimension can be organized in a hierarchical way. For the example of a dimension about the user of the recommender systems, such a hierarchy can result from the geographic location of the user (e.g., address, city, or country). In a multidimensional model, the measures (sometimes called facts) are based in the center with the dimensions surrounding them, which forms the so called star schema that can be easily recognized by the business analysts. The star schema of the proposed framework will be analyzed in the following section.</p><p>It is important to notice that aggregated scores, such as the RMSE, are naturally supported. Nevertheless, the power of a multidimensional model resides in adding further derived measures and the capability of breaking all measures down along the dimensions defined in a very intuitive and highly automated way.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2">Core Features</head><p>Organizing recommender data in a principled way provides automation and tool support. The presented framework enables analysis of all common recommender datasets. It supports both rating prediction and item recommendation scenarios. Besides that, data from other application sources can and should be integrated for enriched analysis capabilities. Notable sources are ERP systems, eCommerce systems and experimentation platform systems employing recommender systems. Their integration leverages analysis of the recommender data by the information available within the application (e.g., recommender performance given the respective website layouts) and also analysis of the application data by recommender information (e.g., revenue by recommender algorithm).</p><p>Compared to RMSE, MAE, precision, recall, and F-measure, more information can be obtained with this framework as, first, additional measures e.g. for coverage, novelty, diver-sity analysis are easily integrated and thus available for all datasets. Second, all measures are enhanced by the respective ranks, (running) differences, (running) percentages, totals, standard deviations and more.</p><p>While a single numerical score assigned to each recommender algorithm's predictions is crucial for determining winners in challenges or when choosing which algorithm to deploy <ref type="bibr" target="#b8">[8]</ref>, from an business insight point of view a lot of interesting information is forgone this way. Relationships between aspects of the data and their influence on the measure may be hidden. One such may be deteriorating increase in algorithmic performance with respect to an increasing number of rating available per item, another the development of the average rating over the lifetime of an item in the product catalog. A key capability of this framework is exposing intuitive ways for analyzing the above measures by other measures or related dimensions.</p><p>From a usability point of view, this framework contributes convenient visual analysis empowering drag-drop analysis and interactive behavior. Furthermore, convenient visual presentation of the obtained results is integrated from the start as any standard conforming client can handle it. Manual querying is still possible as is extending the capabilities of the framework with custom measures, dimensions, or functions and post-processing of received results in other applications. Inspection of the original source data is possible via custom actions which allow the retrieval of the source rows that produced the respective result. Last but not least, aggregations allow for very fast analysis of very large datasets, compared to other tools.</p><p>The following section elaborates on the architecture of the multidimensional model that is used by the proposed framework, by providing its dimensions and measures.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">THE ARCHITECTURE OF THE MULTI-DIMENSIONAL FRAMEWORK</head><p>Figure <ref type="figure" target="#fig_0">1</ref> gives an overview of the architecture of the framework. The source data and the extract-transform-load (ETL) process cleaning it and moving it into the data store are located at the bottom of the framework. The middle tier stores the collected information in a data warehouse manner regarding facts (dashed boxes in the center) and dimensions (surrounding the facts). The multidimensional cubes (for rating recommendation and item prediction) sitting on top of the data store provide access to an extended set of measures (derived from the facts in the warehouse) that allow automatic navigation along their dimensions and interaction with other measures.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.1">The Data Flow</head><p>The data gathered for analysis can be roughly divided into two categories:</p><p>Core data: consisting of the algorithms' training data, such as past ratings, purchase transaction information, online click streams, audio listening data, ... and the persisted algorithms' predictions.</p><p>Increase-insight data: can be used as a means to leverage the analytic power of the framework. It consists roughly of user master data, item master data, user transactional statistics, and item transactional statistics. This data basically captures the metadata and usage statistics data not directly employed by current recommender algorithms (such as demographic data, geographic data, customer performance data. . . ).</p><p>In case of recommender algorithms employed in production environments, relational databases housing the transactional system (maybe driving an e-commerce system like an ERP system or an online shop) will store rich business master data such as item and user demographic information, lifetime information and more, next to rating information, purchase information, and algorithm predictions. In case of scientific applications, different text files containing e.g. rating information, implicit feedback, and the respective user and item attributes for training and the algorithms' predictions are the traditional source of the data.</p><p>From the respective source, the master data, the transactional data, and the algorithm predictions are cleaned, transformed, and subsequently imported into a data warehouse. Referential integrity between the elements is maintained, so that e.g. ratings to items not existing in the system are impossible. Incongruent data is spotted during insert into the recommender warehouse and presented to the data expert.</p><p>Inside the framework, the data is logically split into two categories: measures (facts) that form the numeric information for analysis, and dimensions that form the axes of analysis for the related measures. In the framework schema (figure <ref type="figure" target="#fig_0">1</ref>), the measures are stylized within the dashed boxes. The dimensions surrounding them and are connected to both, the rating prediction and the item recommendation measures.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2">The Measures</head><p>Both groups of measures analyzed by the frameworkthe measures for item recommendation algorithms and the measures for rating prediction algorithms-can be divided into basic statistical and information retrieval measures.</p><p>Statistical measures: Among the basic statistical measures are counts and distinct counts, ranks, (running) differences and (running) percentages of various totals for each dimension table, train ratings, test ratings and predicted ratings; furthermore, averages and their standard deviations for the lifetime analysis, train ratings, test ratings, and predicted ratings.</p><p>Information retrieval measures: Among the information retrieval measures are the popular MAE and (R)MSE for rating prediction, plus user-wise and item-wise aggregated precision, recall and F-measure for item prediction. Novelty, diversity, and coverage measures are also included as they provide additional insight. Furthermore, for comparative analysis, the differences in the measures between any two chosen (groups of) prediction methods are supported as additional measures.</p><p>In case a recommender system and thus this framework is accompanied by a commercial or scientific application, this application usually will have measures of its own. These measures can easily be integrated into the analysis. An example may be an eCommerce application adding sales measures such as gross revenue to the framework. These external measures can interact with the measures and the dimension of the framework. 1   1 E.g., the revenue could be split up by year and recommen-</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.3">The Dimensions</head><p>The dimensions are used for slicing and dicing the selected measures and for drilling down from global aggregates to fine granular values. For our framework, the dimensions depicted in figure <ref type="figure" target="#fig_0">1</ref>  Age: The Age dimension is used for item and user lifetime analysis. Age refers to the relative age of the user or item at the time the rating is given/received or an item from a recommendation list is put into a shopping basket and allows for analysis of trends in relative time (c.f. section 6).</p><p>User : User and the related dimensions such as UserProfile and UserDemographics allow for analysis by user master data and by using dynamically derived information such as activity related attributes. This enables grouping of the users and content generated by them (purchase histories, ratings) by information such as # of ratings or purchases, # of days of activity, gender, geography...</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Item:</head><p>Item and the related dimensions such as ItemCategory and ItemComponent parallel the user-dimensions. In a movie dataset, the item components could be, e.g., actors, directors, and other credits.</p><p>Prediction Method : The Prediction Method dimension allows the OLAP user to investigate the effects of the various classes and types or recommender systems and their respective parameters. Hierarchies, such as Recommender Class, Recommender Type, Recommender Parameters, simplify the navigation of the data.</p><p>eCommerce: As recommender algorithms usually accompany a commercial or scientific application (e.g., eCommerce) having dimensions of its own, these dimensions can easily be integrated into and be used by our framework.</p><p>Experimentation: In case this framework is used in an experiment-driven scenario <ref type="bibr" target="#b8">[8]</ref>, such as an online or marketing setting, Experimentation related dimensions should be used. They parallel the PredictionMethod dimension, but are more specific to their usage scenario.</p><p>dation method, showing the business impact of a recommender.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.">PROTOTYPE DESCRIPTION</head><p>This section describes the implementation of a research prototype for the proposed framework. The prototype was implemented using Microsoft SQL Server 2008 <ref type="bibr" target="#b18">[18]</ref> and was used later for our performance evaluation.</p><p>In our evaluation, the prototype considers the Movielens 1m dataset <ref type="bibr">[9]</ref>, which is a common benchmark for recommender systems. It consists of 6.040 users, 3.883 items, and 1.000.209 ratings received over roughly three years. Each user has at least 20 ratings and the metadata supplied for the users is userId, gender, age bucket, occupation, and zipcode. Metadata for the item is movieId, title and genre information.</p><p>Following a classical data warehouse approach <ref type="bibr" target="#b15">[15,</ref><ref type="bibr" target="#b12">12]</ref>, the database tables are divided into dimension and fact tables. The dimension tables generally consist of two kinds of information: static master data and dynamic metadata. The static master data usually originates from an ERP system or another authoritative source and contains e.g. naming information. The dynamic metadata is derived information interesting for evaluation purposes, such as numbers of ratings given or time spent on the system. To allow for an always up to date and rich information at the same time, we follow the approach of using base tables for dimension master data and views for dynamic metadata derived through various calculations. Further views then expose the combined information as pseudo table. The tables used in the warehouse of the prototype are Date, Time, Genre (instantiation of Category), Item, ItemGenre (table needed for mapping items and genres), Numbers (a helper table), Occupation, PredictedRatings, PredictedItems, PredictionMethod, Tes-tRatings, TestItems, TrainRatings, TrainItems, and User. The Item and User table are in fact views over the master data provided with the Movielens dataset and dynamic information gathered from usage data. Further views are SquareError, UserwiseFMeasure, AllRatings, and AgeAnalysis.</p><p>On top of the warehouse prototype, an OLAP cube for rating prediction was created using Microsoft SQL Server Analysis Services. Within this cube, the respective measures were created: counts and sums, and further derived measures such as distinct counts, averages, standard deviations, ranks, (running) differences and (running) percentage. The core measures RMSE and MAE are derived from the error between predicted and actual ratings. The most important OLAP task with respect to framework development is to define the relationships between the measures and dimensions, as several dimensions are linked multiple times (e.g. the Age dimension is role-playing as it is linked against both item age and user age) or only indirect relationships exist (such as between category and rating the relationship is only established via item). Designing the relationships has to be exercised very carefully, as both correctness of the model and the ability to programmatically navigate dimensions and measures (adding them on the report axes, measure field or as filters) depend on this step. Linking members enables generic dimensions such as Prediction Method A, and Prediction Method B, that can be linked to chosen dimension members. This renders unnecessary the creation of the n(n − 1)/2 possible measures yielding differences between any two prediction methods A and B (for, say, RMSE or F-measure). Furthermore, this approach allows choosing more than one dimension member, e.g. several runs of one algorithm with different parameters, as one linked member for aggregate analysis.</p><p>Before we go on to the evaluation of our prototype, let us state that our framework describes more than simply a model for designing evaluation frameworks. The prototype serves well as a template for other recommender datasets, too. With nothing changed besides the data load procedure, it can be used directly for, e.g., the other Movielens datasets, the Netflix challenge dataset or the Eachmovie dataset. Additional data available in those datasets (e.g. the tagging information from the Movielens 10m dataset) are either ignored or require an extension of the data warehouse and the multidimensional model (resulting in new analysis possibilities).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6.">PERFORMANCE EVALUATION</head><p>In the previous section we have described the implementation of a research prototype of the proposed framework using the Movielens 1m dataset. Building on this prototype, we proceed with presenting a set of results that are obtained by applying it.</p><p>We have to clarify that the objective of our experimental evaluation is not limited to the comparison of specific recommender algorithms, as it is mostly performed in works that propose such algorithms. Our focus is, instead, on demonstrating the flexibility and easiness with which we can answer important questions for the performance of recommendations. It is generally agreed that explicitly modelling the effects describing changes in the rating behavior over the various users (user base-effect), items (item base-effect), and age of the respective item or user (time effects) <ref type="bibr" target="#b3">[3,</ref><ref type="bibr" target="#b16">16,</ref><ref type="bibr" target="#b17">17]</ref>. For this reason, we choose to demonstrate the benefits of the proposed framework by setting our scope on those effects followed by exemplary dissecting the performance of two widely examined classes of recommender algorithms, i.e., collaborative filtering and matrix factorization. We also consider important the exploratory analysis of items and users, which can provide valuable insights for business analysts about factors determining the performance of their recommender systems. We believe that the results presented in the following demonstrate how easy it is to obtain them by using the proposed framework, which favors its usage in real-world applications, but also can provide valuable conclusions to motivate the usage of the framework for pure research purpose, since it allows for observing and analyzing the performance by combining all related dimensions that are being modeled.</p><p>All results presented in the remainder of this section could easily be obtained graphically by navigating the presented measures and dimensions using Excel 2007 as multidimensional client.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6.1">Exploratory Data Analysis</head><p>Using the framework, the first step for a research and a business analytics approach is exploring the data. As an example, the Calendar dimension (Date) is used to slice the average rating measure. Figure <ref type="figure">2</ref> presents this as pivot chart. The sharp slumps noticeable in March and August 2002 together with a general lack of smoothness beyond mid 2001 arouse curiosity and suggest replacing average rating by rating count (figure not shown). Changing from counts to running percentages proves that about 50 percent of the ratings in this dataset are spent within the first six months out of nearly three years. Within two more months 90 percent of the ratings are assigned, roughly seven percent of the data for 50 percent of the time (figure <ref type="figure">3</ref>).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6.1.1">Item Analysis</head><p>The framework allows an easy visualization of the item effect described e.g. in <ref type="bibr" target="#b16">[16]</ref>, namely that there usually is a systematic variation of the average rating per item. Aditionally, other factors can easily be integrated in such an analysis. Figure <ref type="figure">4</ref> shows the number of ratings received per item sorted by decreasing average rating. This underlines the need for regularization when using averages, as the movies rated highest only received a vanishing number of ratings.</p><p>Moving on the x-axis from single items to rating count buckets containing a roughly equal number of items, a trend of heavier rated items being rated higher can be observed (figure omitted for space reasons). A possible explanation might be that blockbuster movies accumulate a huge number of generally positive ratings during a short time and the all-time classics earn a slow but steady share of additional coverage. That all-time classics receive higher ratings can nicely be proved with the framework, too. Consistent with findings during the final phase of the Netflix competition by Koren <ref type="bibr" target="#b17">[17]</ref>, figure <ref type="figure">5</ref> shows a justification for the good results obtained by adding time-variant base effects to recommender algorithms. Besides the all-time classics effect, the blockbuster effect can also be observed (figure <ref type="figure" target="#fig_2">6</ref>), showing that items who receive numerous ratings per day on average also have a higher rating. Slicing the average rating by Genre shows a variation among the different genre with Film-Noir being rated best (average rating 4.07, 1.83% of ratings received), and Horror being rated worst (3.21, 7,64%). Of the Genres with at least ten percent of the ratings received Drama scores highest (3.76, 34.45%) and Sci-Fi lowest (3.46, 15.73%). Figure not shown.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6.1.2">User Analysis</head><p>The user effect can be analyzed just as easy as the item effect. Reproducing the analysis explained above on the users, The effect of the number of ratings per user on the average rating it is interesting to notice that for heavy raters the user rating count effect is inverse to the item rating count effect described above (figure <ref type="figure" target="#fig_3">7</ref>): the higher the amount of ratings spent by a given user, the lower his or her average rating. One explanation to this behavior might be that real heavy raters encounter a lot of rather trashy or at least low quality movies.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6.2">Recommender Model Diagnostics</head><p>For algorithm performance comparison, the Movielens 1m ratings were randomly split into two nearly equal size partitions, one for training (500103), and one for testing (500104 ratings). Algorithm parameter estimation was conducted on the training samples only, predictions were conducted solely on the test partition. Exemplarily, a vanilla matrix factorization (20 features, regularization 0.09, learn rate 0.01, 56 iterations, hyperparameters optimized by 5-fold crossvalidation) is analyzed. 2  For a researcher the general aim will be to improve the overall RMSE or F-Measure, depending on the task, as this is usually what wins a challenge or raises the bar on a given dataset. For a business analyst this is not necessarily the case. A business user might be interested in breaking down the algorithm's RMSE over categories or top items or top users as this may be relevant information from a monetary aspect. The results of the respective queries may well lead to one algorithm being replaced by another on a certain part of the dataset (e.g. subset of the product hierarchy).</p><p>In figure <ref type="figure">8</ref>, RMSE is plotted vs. item rating count in train.This indicates that more ratings on an item do help factor models. Interpreted the other way around, for a business user, this implies that this matrix factorization yields best performance on the items most crucial to him from a top sales point of view (though for slow seller other algorithms might be more helpful).</p><p>The same trend can be spotted when RMSE is analyzed by user rating count on the training set (figure omitted for space reasons), though the shape of the curve follows a straighter line than for the item train rating count (where it follows more an exponential decay).</p><p>Due to the approach taken in the design of the OLAP cube the number of recommender algorithms comparable as A and B is not limited; neither does it have to be exactly one algorithm being compared with exactly one other, as 2 The matrix factorization yielded an RMSE of 0.8831 given the presented train-test split.   multiple selection is possible. Furthermore-given the predictions are already in the warehouse-replacing one method by another or grouping several methods as A or B can nicely be achieved by selecting them in the appropriate drop-down list. Exemplarily, the matrix factorization analyzed above is compared to the global average of ratings as baseline recommendation method. Figure <ref type="figure" target="#fig_5">9</ref> reveals that for this factor model more ratings on train do increase the relative performance, as expected, up to a point from which the static baseline method will gain back roughly half the lost ground. Investigation of this issue might be interesting for future recommender models.</p><p>All results presented could be obtained very fast: when judging the time needed to design query and report (chart)which was on average seconds for construction of the query and making the chart look nice-, and when judging execution time-which was in the sub-second timeframe.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="7.">CONCLUSIONS</head><p>We have proposed a novel multidimensional framework for integrating OLAP with the challenging task of evaluating recommender systems. We have presented the architecture of the framework as a template and described the implementation of a research prototype. Consistent with the other papers at this workshop, the authors of this work believe that the perceived value of a system largely depends on its user interface. Thus, this work provides an easy to use framework supporting visual analysis. Our evaluation demonstrates, too, some of the elegance of obtaining observations with the proposed framework. Besides showing the validity of findings during the recent Netflix prize on another dataset, we could provide new insights, too. With respect to the recommender performance evaluation and the validity of RMSE as an evaluation metric, it would be interesting to see if a significant difference in RMSE concerning the amount of ratings present in the training set would also lead to significant effects in a related user study.</p><p>In our future work, we will consider the extension of our research prototype and develop a web-based implementation that will promote its usage.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: The recommender evaluation framework.The dimensions specified are connected with both fact table groups (dashed boxes in the center) and are thus available in both resulting cubes. End users can connect to the Rating Prediction and Item Recommendation cubes.</figDesc><graphic coords="3,53.80,53.80,239.10,143.46" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 4 :Figure 5 :</head><label>45</label><figDesc>Figure 4: Item rating count sorted by decreasing average rating</figDesc><graphic coords="6,53.80,613.84,240.94,90.27" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 6 :</head><label>6</label><figDesc>Figure 6: The blockbuster effect. Increasing average item rating with increasing number of ratings received per day.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Figure 7 :</head><label>7</label><figDesc>Figure 7: The effect of the number of ratings per user on the average rating</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_5"><head>Figure 9 :</head><label>9</label><figDesc>Figure 9: Difference in RMSE between Matrix Factorization (MF) and Global Average (GA) vs. ratings available per item on the train dataset.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_0"><head></head><label></label><figDesc>are:</figDesc><table><row><cell>Date: The Date dimension is one of the core dimensions</cell></row><row><cell>for temporal analysis. It consists of standard mem-</cell></row><row><cell>bers such as Year, Quarter, Month, Week, Day and</cell></row><row><cell>the respective hierarchies made up from those mem-</cell></row><row><cell>bers. Furthermore, Year-to-date (YTD) and Quar-</cell></row><row><cell>ter/Month/Week/Day of Year logic provides options</cell></row><row><cell>such as searching for a Christmas or Academy Awards</cell></row><row><cell>related effect.</cell></row><row><cell>Time: The Time dimension offers Hour of Day and Minute</cell></row><row><cell>of Day/Hour analysis. For international datasets this</cell></row><row><cell>dimension profits from data being normalized to the</cell></row><row><cell>time zone of the creator (meaning the user giving the</cell></row><row><cell>rating).</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_2"><head></head><label></label><figDesc>Item rating count effect on a factor model. Buckets created on roughly equal item count.</figDesc><table><row><cell>1.8</cell><cell></cell></row><row><cell>1.6</cell><cell></cell></row><row><cell>RMSE</cell><cell></cell></row><row><cell cols="2">0 -0 1 -1 2 -4 5 -8 9 -12 13 -16 17 -21 22 -27 28 -34 35 -43 44 -53 54 -66 67 -80 81 -95 96 -114 115 -136 137 -165 166 -196 197 -241 242 -304 306 -388 389 -551 552 -1715</cell></row><row><cell></cell><cell>Item Train Rating Count</cell></row><row><cell>0 0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 0.45 Figure 8: -0.1 RMSE Difference between GA and MF -0.05</cell><cell>0 -0 1 -1 2 -4 5 -8 9 -12 13 -16 17 -21 22 -27 28 -34 35 -43 44 -53 54 -66 67 -80 81 -95 96 -114 115 -136 137 -165 166 -196 197 -241 242 -304 306 -388 389 -551 552 -1715 Item Rating Count on Train</cell></row></table></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" xml:id="foot_0">Proceedings of the ACM RecSys 2010 Workshop on User-Centric Evaluation of Recommender Systems and Their Interfaces (UCERSTI), Barcelona, Spain, Sep 30, 2010 Published by CEUR-WS.org, ISSN 1613-0073, online ceur-ws.org/Vol-612/paper6.pdf</note>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="8.">ACKNOWLEDGMENTS</head><p>The authors gratefully acknowledge the co-funding of their work through the European Commission FP7 project My-Media (grant agreement no. 215006) and through the European Regional Development Fund project LEFOS (grant agreement no. 80028934).</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<monogr>
		<title/>
		<author>
			<persName><surname>References</surname></persName>
		</author>
		<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Incorporating contextual information in recommender systems using a multidimensional approach</title>
		<author>
			<persName><forename type="first">G</forename><surname>Adomavicius</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sankaranarayanan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Sen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Tuzhilin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">ACM Trans. Inf. Syst</title>
		<imprint>
			<biblScope unit="volume">23</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="103" to="145" />
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Multidimensional recommender systems: A data warehousing approach</title>
		<author>
			<persName><forename type="first">G</forename><surname>Adomavicius</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Tuzhilin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">WELCOM &apos;01: Proceedings of the Second International Workshop on Electronic Commerce</title>
				<meeting><address><addrLine>London, UK</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2001">2001</date>
			<biblScope unit="page" from="180" to="192" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Modeling relationships at multiple scales to improve accuracy of large recommender systems</title>
		<author>
			<persName><forename type="first">R</forename><surname>Bell</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Koren</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Volinsky</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">KDD &apos;07: Proceedings of the 13th ACM SIGKDD international conference on Knowledge discovery and data mining</title>
				<meeting><address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2007">2007</date>
			<biblScope unit="page" from="95" to="104" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Understanding choice overload in recommender systems</title>
		<author>
			<persName><forename type="first">D</forename><surname>Bollen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">P</forename><surname>Knijnenburg</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">C</forename><surname>Willemsen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Graus</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">RecSys &apos;10: Proceedings of the 2010 ACM conference on Recommender systems</title>
				<meeting><address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2010">2010</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<monogr>
		<title level="m" type="main">Empirical analysis of predictive algorithms for collaborative filtering</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Breese</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Heckerman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Kadie</surname></persName>
		</author>
		<idno>MSR-TR-98-12</idno>
		<imprint>
			<date type="published" when="1998">1998</date>
			<publisher>Morgan Kaufmann</publisher>
			<biblScope unit="page" from="43" to="52" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">A new approach to evaluating novel recommendations</title>
		<author>
			<persName><forename type="first">O</forename><surname>Celma</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Herrera</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">RecSys &apos;08: Proceedings of the 2008 ACM conference on Recommender systems</title>
				<meeting><address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="179" to="186" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<monogr>
		<title level="m" type="main">Providing OLAP to user-analysts: An it mandate</title>
		<author>
			<persName><forename type="first">E</forename><surname>Codd</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Codd</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Salley</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1993">1993</date>
			<pubPlace>Ann Arbor,MI</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">Seven pitfalls to avoid when running controlled experiments on the web</title>
		<author>
			<persName><forename type="first">T</forename><surname>Crook</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Frasca</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Kohavi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Longbotham</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">KDD &apos;09: Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining</title>
				<meeting><address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2009">2009</date>
			<biblScope unit="page" from="1105" to="1114" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<monogr>
		<ptr target="http://www.grouplens.org/node/73" />
		<title level="m">Movielens data sets</title>
				<imprint/>
		<respStmt>
			<orgName>GroupLens</orgName>
		</respStmt>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">An on-line evaluation framework for recommender systems</title>
		<author>
			<persName><forename type="first">C</forename><surname>Hayes</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Massa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Avesani</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Cunningham</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Workshop on Personalization and Recommendation in E-Commerce</title>
				<meeting><address><addrLine>Malaga</addrLine></address></meeting>
		<imprint>
			<publisher>Springer Verlag</publisher>
			<date type="published" when="2002">2002</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Evaluating collaborative filtering recommender systems</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">L</forename><surname>Herlocker</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">A</forename><surname>Konstan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">G</forename><surname>Terveen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">T</forename><surname>Riedl</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">ACM Trans. Inf. Syst</title>
		<imprint>
			<biblScope unit="volume">22</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="5" to="53" />
			<date type="published" when="2004">2004</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<monogr>
		<title level="m" type="main">Building the Data Warehouse</title>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">H</forename><surname>Inmon</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2005">2005</date>
			<publisher>Wiley</publisher>
		</imprint>
	</monogr>
	<note>4th ed.</note>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Accurately interpreting clickthrough data as implicit feedback</title>
		<author>
			<persName><forename type="first">T</forename><surname>Joachims</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Granka</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Pan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Hembrooke</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Gay</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">SIGIR &apos;05: Proceedings of the 28th annual international ACM SIGIR conference on Research and development in information retrieval</title>
				<meeting><address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2005">2005</date>
			<biblScope unit="page" from="154" to="161" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">How users perceive and appraise personalized recommendations</title>
		<author>
			<persName><forename type="first">N</forename><surname>Jones</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Pu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Chen</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">UMAP &apos;09: Proceedings of the 17th International Conference on User Modeling, Adaptation, and Personalization</title>
				<meeting><address><addrLine>Berlin, Heidelberg</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2009">2009</date>
			<biblScope unit="page" from="461" to="466" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<monogr>
		<title level="m" type="main">The Data Warehouse Toolkit: The Complete Guide to Dimensional Modeling</title>
		<author>
			<persName><forename type="first">R</forename><surname>Kimball</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ross</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2002">2002</date>
			<publisher>Wiley</publisher>
		</imprint>
	</monogr>
	<note>2nd ed</note>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">Factorization meets the neighborhood: a multifaceted collaborative filtering model</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Koren</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">KDD &apos;08: Proceeding of the 14th ACM SIGKDD international conference on Knowledge discovery and data mining</title>
				<meeting><address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="426" to="434" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">Collaborative filtering with temporal dynamics</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Koren</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">KDD &apos;09: Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining</title>
				<meeting><address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2009">2009</date>
			<biblScope unit="page" from="447" to="456" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<monogr>
		<title level="m" type="main">Microsoft SQL Server</title>
		<ptr target="http://www.microsoft.com/sqlserver/2008/" />
		<imprint>
			<date type="published" when="2008">2008</date>
		</imprint>
		<respStmt>
			<orgName>Microsoft</orgName>
		</respStmt>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<monogr>
		<title level="m" type="main">Management Information Systems</title>
		<author>
			<persName><forename type="first">J</forename><surname>O'brien</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Marakas</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2009">2009</date>
			<publisher>McGraw-Hill/Irwin</publisher>
		</imprint>
	</monogr>
	<note>9th ed.</note>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">Awesome: a data warehouse-based system for adaptive website recommendations</title>
		<author>
			<persName><forename type="first">A</forename><surname>Thor</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Rahm</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">VLDB &apos;04: Proceedings of the Thirtieth international conference on Very large data bases</title>
				<imprint>
			<publisher>VLDB Endowment</publisher>
			<date type="published" when="2004">2004</date>
			<biblScope unit="page" from="384" to="395" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">Avoiding monotony: improving the diversity of recommendation lists</title>
		<author>
			<persName><forename type="first">M</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Hurley</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">RecSys &apos;08: Proceedings of the 2008 ACM conference on Recommender systems</title>
				<meeting><address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>ACM Press</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="123" to="130" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">Improving recommendation lists through topic diversification</title>
		<author>
			<persName><forename type="first">C.-N</forename><surname>Ziegler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">M</forename><surname>Mcnee</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">A</forename><surname>Konstan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Lausen</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">WWW &apos;05: Proceedings of the 14th international conference on World Wide Web</title>
				<meeting><address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2005">2005</date>
			<biblScope unit="page" from="22" to="32" />
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
