<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">RuleMiner: An Interactive Web Tool for Rule-Based Data Analysis</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Marek</forename><surname>Sikora</surname></persName>
							<email>marek.sikora@polsl.pl</email>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Łukasiewicz Research Network</orgName>
								<orgName type="department" key="dep2">Institute of Innovative Technologies EMAG</orgName>
								<address>
									<addrLine>ul. Leopolda 31</addrLine>
									<postCode>40-189</postCode>
									<settlement>Katowice</settlement>
									<country key="PL">Poland</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="department">Department of Computer Networks and Systems</orgName>
								<orgName type="institution">Silesian University of Technology</orgName>
								<address>
									<addrLine>ul. Akademicka 16</addrLine>
									<postCode>44-100</postCode>
									<settlement>Gliwice</settlement>
									<country key="PL">Poland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Dawid</forename><surname>Macha</surname></persName>
							<email>dawid.macha@emag.lukasiewicz.gov.pl</email>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Łukasiewicz Research Network</orgName>
								<orgName type="department" key="dep2">Institute of Innovative Technologies EMAG</orgName>
								<address>
									<addrLine>ul. Leopolda 31</addrLine>
									<postCode>40-189</postCode>
									<settlement>Katowice</settlement>
									<country key="PL">Poland</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="department">Department of Computer Networks and Systems</orgName>
								<orgName type="institution">Silesian University of Technology</orgName>
								<address>
									<addrLine>ul. Akademicka 16</addrLine>
									<postCode>44-100</postCode>
									<settlement>Gliwice</settlement>
									<country key="PL">Poland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Joanna</forename><surname>Badura</surname></persName>
							<email>joanna.badura@emag.lukasiewicz.gov.pl</email>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Łukasiewicz Research Network</orgName>
								<orgName type="department" key="dep2">Institute of Innovative Technologies EMAG</orgName>
								<address>
									<addrLine>ul. Leopolda 31</addrLine>
									<postCode>40-189</postCode>
									<settlement>Katowice</settlement>
									<country key="PL">Poland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Artur</forename><surname>Kozłowski</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Łukasiewicz Research Network</orgName>
								<orgName type="department" key="dep2">Institute of Innovative Technologies EMAG</orgName>
								<address>
									<addrLine>ul. Leopolda 31</addrLine>
									<postCode>40-189</postCode>
									<settlement>Katowice</settlement>
									<country key="PL">Poland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Łukasz</forename><surname>Wróbel</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Łukasiewicz Research Network</orgName>
								<orgName type="department" key="dep2">Institute of Innovative Technologies EMAG</orgName>
								<address>
									<addrLine>ul. Leopolda 31</addrLine>
									<postCode>40-189</postCode>
									<settlement>Katowice</settlement>
									<country key="PL">Poland</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="department">Department of Computer Networks and Systems</orgName>
								<orgName type="institution">Silesian University of Technology</orgName>
								<address>
									<addrLine>ul. Akademicka 16</addrLine>
									<postCode>44-100</postCode>
									<settlement>Gliwice</settlement>
									<country key="PL">Poland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff2">
								<address>
									<settlement>Bucharest</settlement>
									<country key="RO">Romania</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">RuleMiner: An Interactive Web Tool for Rule-Based Data Analysis</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">440E18E622516269C1771DF50DC22DBB</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2025-04-23T18:27+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>rule induction</term>
					<term>data mining application</term>
					<term>RuleKit</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Rule induction is a powerful tool for data mining. It can be used to create understandable prediction models that are presented as a set of rules. Because of their interpretability, they can be used and understood by a wide range of users -from data science experts to professionals in other fields who want to analyse their data. For this reason, it is beneficial to offer systems for rule induction with a graphical user interface (GUI) that would be usable also for users without programming skills. This article proposes a RuleMiner -a new web service for rule induction and rule-based data analysis. RuleMiner application is a development of the original RuleKit library co-developed by the authors. The platform allows users to run RuleKit algorithms for classification, regression, and survival rules at the GUI level without requiring programming knowledge. It also offers the functionalities to work with sets of rules and datasets, visualise the results, perform user-driven rule induction, analyse the created models, explore the coverage of individual rules and examples, analyse models' predictive capabilities, and perform predictions for new examples. The RuleMiner platform is publicly available. The article presents the related work, shows RuleKit comparison with selected algorithms, highlights the most important RuleMiner functionalities, and provides an illustrative example of the proposed system's usage.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>The article presents a RuleMiner web service for rule induction and rule-based data analysis as the development of the RuleKit library <ref type="bibr" target="#b0">[1]</ref>. The RuleKit includes classification, regression, and survival rule induction algorithms developed by our team. Based on this library, we have also developed algorithms for action rule induction <ref type="bibr" target="#b1">[2]</ref>, rule-based expalinability <ref type="bibr" target="#b2">[3]</ref> and user-driven rule induction <ref type="bibr" target="#b3">[4]</ref>.</p><p>Until now, the RuleKit algorithms were available only to people who were proficient in programming, or it required our involvement in research, e.g. as in <ref type="bibr" target="#b4">[5]</ref>. Thus, the motivation behind creating the RuleMiner application was to share a wide range of tools for knowledge discovery, data mining, and generating rule-based prediction systems accessible to a broad audience. We are particularly interested in making the RuleMiner application beneficial to people from various scientific fields who want to use rule systems to analyse their own datasets. RuleMiner allows users to run RuleKit algorithms from the GUI level (thus without programming knowledge) and analyse quantitative results in a user-friendly form. It also allows users to work with rulesets and datasets -the user can edit them, filter them, run user-driven induction, visualise results, analyse coverage of individual rules and examples, and more.</p><p>In this article, we show the effectiveness of our computation kernel (RuleKit) and compare it with other methods. The article also presents the most important functionalities and shows an illustrative example of system usage. All information about the proposed application can be found on the RuleMiner website at https://ruleminer.ai/ and on our GitHub page at https://github.com/ruleminer/ruleminer. The RuleMiner application is available at https://app.ruleminer.ai/. Additional research results can be found in the report available also at our GitHub repository <ref type="foot" target="#foot_0">1</ref> . In accordance with good research practice, we have also made public the detailed numerical results of our research<ref type="foot" target="#foot_1">2</ref> and the datasets <ref type="foot" target="#foot_2">3</ref> .</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Related work</head><p>Rule induction is used for tasks of knowledge discovery and, because of its interpretability, for explainability (XAI) tasks <ref type="bibr" target="#b5">[6]</ref>. There are multiple approaches to rule induction. A popular one is the separate-and-conquer strategy (SoC), also known as the sequential covering approach. This strategy was proposed by Michalski <ref type="bibr" target="#b6">[7]</ref> and includes methods for generating a ruleset or a rules list. An extensive discussion of the covering approach and a review of SoC-based rule induction algorithms was presented by Funkranz <ref type="bibr" target="#b7">[8]</ref> and can be found in book <ref type="bibr" target="#b8">[9]</ref>. SoC is still used to define new rule induction algorithms, particularly in regression problems <ref type="bibr" target="#b0">[1,</ref><ref type="bibr" target="#b9">10]</ref>, action rule induction <ref type="bibr" target="#b1">[2]</ref>, and survival rule induction <ref type="bibr" target="#b10">[11]</ref>. Related approaches are methods that connect separate-and-conquer strategy with rough set theory (RST) <ref type="bibr" target="#b11">[12]</ref>. There are also methods that look for locally optimal rules for a specific observation from a training dataset <ref type="bibr" target="#b12">[13,</ref><ref type="bibr" target="#b13">14]</ref>. The next group of algorithms are the methods that are based on ensemble learning <ref type="bibr" target="#b14">[15]</ref>. They can use boosting <ref type="bibr" target="#b15">[16,</ref><ref type="bibr" target="#b16">17]</ref> or bagging <ref type="bibr" target="#b17">[18]</ref> techniques. Rule induction can also involve optimising the entire ruleset based on a given loss function <ref type="bibr" target="#b18">[19,</ref><ref type="bibr" target="#b19">20]</ref>. The literature also describes simpler examples of rule inductions, e.g., the OneR algorithm <ref type="bibr" target="#b20">[21]</ref>, that generates a set of rules that test only one particular attribute. Some of the mentioned methods are suitable for solving both classification and, with some adjustments, regression problems.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.">Algorithms and implementations</head><p>There are numerous implementations of rule-based algorithms and data mining programming libraries with rule-based methods. The given section presents a selection of them to demonstrate the currently available tools.</p><p>Rule induction can involve different types of rules, but the most common ones are association rules and rules with conclusions created based on one attribute. In the latter case, we can generate classification rules (that is, decision rules) or regression rules. It is also possible to create survival rules that, in conclusion, have an estimator of the survival function.</p><p>The association rules can be generated in numerous software, e.g. in Orange library <ref type="bibr" target="#b21">[22]</ref> or Weka package <ref type="bibr" target="#b22">[23]</ref>. These libraries are general data mining purpose libraries, and the association rules are only one of many available algorithms. The association rules can also be generated by programming libraries e.g. Python apyori library (https://github.com/ymoch/apyori, last accessed 2024/06/07) or R package arules <ref type="bibr" target="#b23">[24]</ref>.</p><p>The second group of software includes tools that implement algorithms for generating classification, regression, or survival rules. Below, we present well-recognised algorithms and the methods that we used in experiments, which will be presented in further sections of the article. The exemplary algorithms for rule induction and their implementations are as follows:</p><p>• AQ15 classification algorithm that is available in Rseslib 3 library <ref type="bibr" target="#b24">[25]</ref>.</p><p>• BOOMER <ref type="bibr" target="#b25">[26]</ref> algorithm for learning ensembles of gradient-boosted multi-label classification rules.</p><p>• BRCG <ref type="bibr" target="#b18">[19]</ref> algorithm for classification rule induction for binary datasets, with implementation available in AIX360 package <ref type="bibr" target="#b26">[27]</ref>. • CN2 algorithm <ref type="bibr" target="#b27">[28]</ref> for generating classification rules. Its implementations can be found in Orange library <ref type="bibr" target="#b21">[22]</ref>.</p><p>• Interpretable Decision Sets (IDS) algorithm <ref type="bibr" target="#b19">[20]</ref>. The Python package pyIDS <ref type="bibr" target="#b28">[29]</ref> offers the implementation of the algorithm. • JRip algorithm <ref type="bibr" target="#b29">[30]</ref>, also called RIPPER. It is implemented in Weka package <ref type="bibr" target="#b22">[23]</ref>, and it is also available in Altair AI Studio (previously RapidMiner<ref type="foot" target="#foot_3">4</ref> ) and AIX360 package <ref type="bibr" target="#b26">[27]</ref>. • LORD <ref type="bibr" target="#b13">[14]</ref>. It is an algorithm for learning locally optimal classification rules. • M5Rules algorithm <ref type="bibr" target="#b30">[31]</ref> implemented in Weka package <ref type="bibr" target="#b22">[23]</ref> for regression problems; however, it generates a ruleset based on trees. • MLRules <ref type="bibr" target="#b15">[16]</ref> available in Weka package <ref type="bibr" target="#b22">[23]</ref> for classification problems.</p><p>• OneR <ref type="bibr" target="#b20">[21]</ref>. Its implementation can be found in the Weka package <ref type="bibr" target="#b22">[23]</ref>.</p><p>• RuleFit algorithm <ref type="bibr" target="#b14">[15]</ref> for generating classification rules for data with binary labels. The implementation is available in Python imodels library <ref type="bibr" target="#b31">[32]</ref>. It can also be used for regression datasets.</p><p>• RuleKit <ref type="bibr" target="#b0">[1]</ref>. It offers the induction of classification, regression and survival rules. Its implementation is available in the GitHub repository at https://github.com/adaa-polsl/RuleKit.</p><p>There is also available software that integrates numerous rule-based algorithms. Rseslib 3 library <ref type="bibr" target="#b24">[25]</ref> implements the AQ15 algorithm and numerous methods for generating decision rules from reducts. In the KEEL software library <ref type="bibr" target="#b32">[33]</ref>, there are available multiple rule learning algorithms for classification, e.g. AQ15, CN2, OneR, RIPPER and others. Several rule-based algorithms can also be found in the already mentioned libraries: Weka <ref type="bibr" target="#b22">[23]</ref>, imodels <ref type="bibr" target="#b31">[32]</ref>, and AIX360 <ref type="bibr" target="#b26">[27]</ref>; however, these packages are not solely focused on rule induction. There are also available frameworks for creating rule learners. The example can be SeCo Framework (documentation can be found at https://ke-tud.github.io/resources/SeCo.html, last accessed 2024/06/07).</p><p>As mentioned before, this is only a selection of available algorithms and their implementations. However, there is a visible dominance of algorithms for the induction of classification rules. There are few algorithms for the induction of regression rules, and only RuleKit offers the algorithm for generating rulesets for censored datasets. We can conclude that the RuleKit tool is the most versatile in terms of the data for which it can perform rule induction (classification problems with binary labels and for problems with multiple classes, regression data and survival datasets).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.">Visual tools for rule induction</head><p>On the market, we can find programmes offering a graphical user interface (GUI) for performing data mining and predictions using rules. These programmes have the advantage of being accessible not only to users experienced in programming languages who can write their own programs.</p><p>Our RuleMiner system focuses on inducing classification, regression, and survival rules. Therefore, our overview of available GUI applications for creating rule-based models only covers these types of inductions.</p><p>Orange library <ref type="bibr" target="#b21">[22]</ref> offers an open-source GUI application that can be used for many data mining tasks. In this application, multiple widgets are available, e.g. for loading data, visualisations, and modelling. They can be moved and connected with each other to create the data mining process. Orange uses the CN2 algorithm to induct classification rules. It offers a simple viewer for rules with basic statistics, such as the length and quality of each rule. Similar systems are already mentioned Altair AI Studio (formerly Altair RapidMiner), KEEL <ref type="bibr" target="#b32">[33]</ref> and KNIME <ref type="bibr" target="#b33">[34]</ref>. The Altair AI studio and KEEL give the widgets to generate classification rule-based models. The KNIME system has an implemented method for fuzzy rule learners, but these rules are outside this overview's scope. KNIME also allows the creation of user-defined rules that can be used to label new datasets.</p><p>Another system is Weka <ref type="bibr" target="#b22">[23]</ref> -machine learning software that provides an interface for analysing datasets. As was mentioned in Section 2.1, Weka can generate classification and regression rules by multiple algorithms, which may be the advantage of this tool. After the induction process, Weka gives an overview of the generated rules and provides basic statistics of the ruleset. Weka also offers a KnowledgeFlow tool that can be used to create data mining processes.</p><p>A very simple tool is QMAK <ref type="bibr" target="#b34">[35]</ref>, which is a part of Rseslib 3, and it uses the Rseslib 3 library as the source of classification models. QMAK provides only simple functionality for generating and applying classification rules to a dataset.</p><p>The system similar in some aspects to RuleMiner is a service EasyMiner <ref type="bibr" target="#b35">[36]</ref>. It can be used to create classification models based on association rules. The user can load the dataset and modify the attributes. Then, based on the provided association rule pattern, the system creates a ruleset. In the article <ref type="bibr" target="#b36">[37]</ref>, the authors also present the functionalities for the edition of rule-based models, which distinguishes the system from other tools described before. The system also provides measures of predictive model quality.</p><p>Most of the tools discussed (Orange, KNIME, KEEL, Weka, QMAK) are not solely focused on rule induction, and because of this, they do not provide advanced functionalities for manipulating rule-based models and results. These five tools are also all desktop applications. It means that the datasets and analysis are unavailable to the user when using different devices. Only the EasyMiner system, which is focused on applying rule induction to data, provides interesting functionalities specific to rule-based models, such as adding, editing, removing rules, attribute modification or creating new rules. When applying the changes, the user can also interactively validate the model's predictive performance. EasyMiner is an online tool, so the program does not need to be configured on the user's hardware. The disadvantage of the EasyMiner system is the support of only association and classification rule induction.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">RuleMiner</head><p>Considering available programmes for rule induction, we propose RuleMiner -an online system for rule-based data mining. It is based on our original software for rule induction called RuleKit <ref type="bibr" target="#b0">[1]</ref>. It is acknowledged in the research field, as evidenced by numerous publications in reputable journals <ref type="bibr" target="#b3">[4,</ref><ref type="bibr" target="#b2">3]</ref>. Additionally, it is constantly developed, and new software is created based on it.</p><p>RuleMiner is an analytical tool that allows users to work with data using rules. It can be used to work with classification, regression, and survival data. RuleMiner offers the possibility to predict data using rulesets, evaluate the quality of rules and rulesets, create rules based on the user's specific requirements (user-driven rule induction), visualise results, compare rulesets with syntactic and semantic rule similarity and generate reports.</p><p>RuleMiner is a web application that does not require programming knowledge. Thanks to this, it is available to many users. Projects can be accessed from many devices, and there is no need to configure the application on the user's hardware. Due to the application's web-based nature, the imported data are stored in a centralised database. Users are obligated not to enter personal data, unanonymised sensitive data, or legally protected information into the application. However, those who wish to work with sensitive, protected, or proprietary data can still do so by applying data anonymization or pseudonymization techniques before uploading their datasets. These processes help ensure that the data is altered in a way that prevents the identification of individuals or the exposure of confidential information. It is important to note that even with these precautions, the service provider is not liable for the stored data. The latest versions of the terms of service and details on each user's available resources under different plans can be accessed on the following website: https://ruleminer.ai/.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1.">RuleKit algorithms and implementations</head><p>RuleKit is a computational kernel of the RuleMiner application. It is implemented in Java and is opensource, available at https://github.com/adaa-polsl/RuleKit. In RuleMiner, a RuleKit python wrapper was used (available at https://github.com/adaa-polsl/RuleKit-python). RuleKit is suitable for classification, regression, and survival problems. However, different software was also created based on it. RuleKit and RuleKit-related software enable rule induction, rule filtering, user-driven rule induction, contrast set mining, action rule mining, induction of rules with complex and M-of-N conditions, and evaluation of elementary condition importance. The full list of software and publications related to RuleKit and RuleMiner can be found on our GitHub repository (https://github.com/ruleminer/ruleminer) and RuleMiner website (https://ruleminer.ai/publications/). The article discusses the first version of RuleMiner, which does not yet implement all the functionality available in RuleKit-related software. Our ambition, however, is that the RuleMiner application will provide more possibilities for using RuleKit-related algorithms in the future.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1.1.">Comparing RuleKit with other algorithms</head><p>Most of the available data mining and rule induction software, described in section 2.1, offers rule induction for classification datasets. Because of this, we compared RuleKit with other methods in the context of classification rule induction.</p><p>For the RuleKit comparison, we have chosen well-known separate-and-conquer methods (AQ15 <ref type="bibr" target="#b24">[25]</ref>, CN2 <ref type="bibr" target="#b27">[28]</ref>, JRip <ref type="bibr" target="#b29">[30]</ref>) but also methods representing different approaches to rule induction (MLRules <ref type="bibr" target="#b15">[16]</ref>, RuleFit <ref type="bibr" target="#b14">[15]</ref>, LORD <ref type="bibr" target="#b13">[14]</ref>, BRCG <ref type="bibr" target="#b18">[19]</ref>, IDS <ref type="bibr" target="#b19">[20]</ref>, OneR <ref type="bibr" target="#b20">[21]</ref>), all with available implementations. The algorithms were mostly run with their default parameters. CN2 algorithm, which is similar to RuleKit, was run with two versions of parameters: default parameters (denoted below as CN2_default) and parameters that match the default RuleKit parameters (denoted as CN2_v2). RuleKit was launched with default parameters and with two measures -C2 (denoted as RuleKit_C2) and Correlation (RuleKit_Corr). The experiments were performed based on 10-fold cross-validations on 50 smaller datasets (most of them come from the UCI and OpenML repositories) and 9 datasets that we classified as bigger datasets (up to 300,000 rows in a dataset). The analyses were performed separately for binary and multi-label classification datasets and separately for smaller and bigger datasets. As the comparison metrics, we have chosen classification accuracy (denoted as ACC) on test and train datasets, balanced accuracy (BAcc) on test and train datasets, number of rules, number of conditions and computation time.</p><p>This article presents only the results of selected metrics due to the limited article space. However, the extensive experiments report is available on our GitHub page <ref type="foot" target="#foot_4">5</ref> . The report also provides detailed information about the implementations used, the parameters of each algorithm and information about available numerical results in CSV files and the datasets on which the analyses were performed. The results presented are from the current report as of June 7, 2024. However, we intend to update the report with additional results from other algorithms in the future. We are also open to conducting experiments using other implementations of rule-based methods, which can be submitted to us.</p><p>The obtained results are presented using Critical Difference (CD) diagrams <ref type="bibr" target="#b37">[38]</ref>. In the article, we present CD diagrams (see Fig. <ref type="figure" target="#fig_1">1</ref>) for ACC calculated on test datasets for smaller binary and multi-label datasets, BAcc calculated on test datasets for bigger binary and multi-label datasets, the computational time for smaller multi-label datasets and computational time for bigger binary datasets. The LORD algorithm was not included in the balanced accuracy rankings because the implementation did not output BAcc metric results.</p><p>The experiments showed that RuleKit is a good base solution for our RuleMiner platform. It performs well, especially against other coverage algorithms. The basic form of the method can generate good rule classifiers, and the created models have good predictive abilities. The flaw of the RuleKit basic version is the computation time, which puts it in the middle of the compared algorithms but ahead of standard coverage algorithms. To reduce computational time, the number of generated rules could be limited -RuleKit and RuleMiner can adjust the number of generated rules (also presented in the extensive report). We also conclude that RuleKit is a good computation kernel of the RuleMiner platform because it is the only algorithm that has the ability to generate survival rules.</p><p>MLRules and LORD are RuleKit's biggest competitors -MLRules because of its prediction accuracy and LORD because of its computation time. Considering this, we plan to add to RuleMiner the possibility  of including rules generated by these algorithms and implement the classification mechanism that these algorithms use. In our opinion, the LORD algorithm would also benefit from using a filtration approach.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2.">Service functionalities</head><p>RuleMiner application offers many functionalities for data mining using rule induction, and it was designed to be user-friendly. The user can create projects, load and analyse datasets and induce rules for classification, regression and survival data. It not only offers the functionalities to run rule induction algorithms and explore the results (e.g., view the created rulesets, view characteristics of single rules and a whole ruleset), but it also allows the user to connect different rulesets, filter rules, manually edit them, and combine rules that were created in different ways (generated automatically, created manually, and induced using a user-driven approach). Thanks to this, the user has a wide range of functionalities to create the rule-based model that describes a dataset well, has good predictive capabilities, and includes rules relevant from the user's point of view. The changes can be verified on an ongoing basis, and their effect on the quality of the ruleset and the predictive capabilities can be monitored. The main functionalities groups available in the RuleMiner application are presented in Fig. <ref type="figure" target="#fig_2">2</ref>, and the full description of functionalities is available in the documentation at our GitHub Wiki page (https://github.com/ruleminer/ruleminer/wiki).</p><p>The RuleMiner system offers functionality for generating reports, as it is shown in Fig. <ref type="figure" target="#fig_2">2</ref>. This is a non-interactive part of the system, but it can be used to compare the results of different machine learning methods, not only rule-based ones. What is also worth noting is that RuleMiner offers a simple expert system that automatically configures the parameters of the rule induction algorithm based on the user's answers to defined questions. This allows the user to adjust the induction process to their  needs without exploring algorithm parameters. One of the questionnaire's windows is shown in Fig. <ref type="figure" target="#fig_3">3</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.3.">Illustrative example</head><p>The illustrative example is based on a classification problem and a publicly available dataset that describes patients potentially infected with SARS-CoV-2 <ref type="bibr" target="#b38">[39]</ref>. The goal was to diagnose patients based on questionnaires they filled out in the hospital. In this example, we are not focusing on analyzing the dataset but on demonstrating some of RuleMiner's functions based on this dataset.</p><p>Fig. <ref type="figure" target="#fig_4">4</ref> shows the RuleMiner main window with a generated ruleset sorted by three criteria. Users can analyse, sort and label the rules, evaluate the prediction quality and see the importance of attributes and conditions based on the generated ruleset <ref type="bibr" target="#b2">[3]</ref> (Fig. <ref type="figure" target="#fig_5">5</ref> presents condition importance for one class).  Then, it is possible to analyse the quality of predictions calculated on the train and test dataset (see Fig. <ref type="figure" target="#fig_6">6</ref>).   Finally, users can visualise how the rules from the ruleset are connected with each other, particularly how the elementary conditions are linked. This is demonstrated in Fig. <ref type="figure" target="#fig_9">8</ref>. It's worth noting that when users select different conditions -not necessarily from the same rule -the application provides information about the hypothetical rule created in this manner (shown on the right side of Fig. <ref type="figure" target="#fig_9">8</ref>).</p><p>The presented RulMiner windows represent only a small portion of the functionalities available in the RuleMiner application. However, they demonstrate the capabilities of the proposed system and its usability for analysing datasets using rulesets. Other available functionalities are presented in Fig. <ref type="figure" target="#fig_2">2</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Conclusions and future work</head><p>We propose a RuleMiner web application for data analysis using rules. This system is a development of the RuleKit software, which has proven to be effective enough compared to other available solutions, as demonstrated by experimental results and a comprehensive report <ref type="foot" target="#foot_5">6</ref> . To ensure reproducibility, we have provided numerical results and datasets for other users to utilise in their experiments. In the future, we hope to enhance the comparison report by including outcomes from other methods that authors may submit to us.</p><p>When compared with other GUI tools for rule induction, RuleMiner has several advantages. First of all, it is the only visual tool for rule induction that is designed to enhance the capabilities of rule-based data analysis for three types of problems: classification, regression, and survival. In contrast, most other applications limit their functionality to a single type of rule induction, mostly classification rules.</p><p>RuleMiner's distinct advantage lies in its flexible and user-centric approach to rule creation. Users can use automatic rule induction in two variants: with parameters set based on a survey of the user's needs or by manually fine-tuning the parameters to suit specific requirements. The user can also load a previously prepared ruleset or manually define all of the rules. What truly sets RuleMiner apart, however, is its support for user-driven rule induction -users can define, e.g., what conditions or attributes should be forbidden and which should be preferred by the algorithm. Thanks to this, the algorithm can incorporate the user's unique domain expertise but still enables the discovery of novel rules that may be unknown to the user.</p><p>Because the RuleMiner application focuses solely on rule-based data analysis, it offers many functionalities for ruleset analysis that are not available in software where rule induction is merely one of many functionalities. Besides standard ruleset and rules metric presentation, it also gives insights into the importance of attributes and conditions, ruleset graph-based visualizations, a visual tool for coverage analysis, and analysis of predictive capabilities, among other features.</p><p>An important advantage of RuleMiner is also the whole spectrum of functionalities for editing rulesets. Users can easily modify, add, disable, or remove rules, akin to the capabilities found in EasyMiner. Additionally, in RuleMiner, the user can combine rules from different rulesets, label rules, and filter rules by specific conditions. Importantly, all analyses available for initially generated rulesets can also be applied to any modified ruleset, offering flexibility in rule-based data analysis.</p><p>To conclude, the RuleMiner application offers not only a GUI for running RuleKit algorithms, functionalities to analyze induced rules or using created model for predictions but also gives the user the whole spectrum of tools to interact with created rules and personalise the induction process. Thanks to this, users can use their specific domain knowledge, experiment with ruleset modifications, and work with the system interactively. We believe that this high level of user engagement and customization is the significant advantage of the RuleMiner system.</p><p>The current version of the application has several useful functions that enable a complete rule-based data analysis process, but there is still room to add more. Right now, RuleMiner offers induction for classification, regression, and survival rules, but there are more RuleKit-based algorithms that could be included in the RuleMiner system in the future. For example, currently, there is no available induction of contrast sets or action rules that we want to implement into the GUI application. RuleMiner will be further developed both to provide the user with more tools for working with rules and data and to expand the number of available algorithms. If possible, we also want to add to the RuleMiner application import of MLRules and LORD rulesets and their classification mechanics, as these methods are the most promising in terms of the quality of generated rulesets and calculation time on large datasets. In the future, there will also be the functionality to load rulesets from any system, as long as the rules are in the specified format.</p><p>The RuleMiner application is currently available at https://app.ruleminer.ai/ and is in a stable version with ongoing development. The capacity for analysing datasets will expand in the future as computing resources increase. Any reported bugs are being fixed, and the application is undergoing continuous testing. We believe that the proposed RuleMiner application will be a valuable tool for analysing data using rules, serving a wide range of users.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head></head><label></label><figDesc>(a) ACC for smaller binary datasets. (b) ACC for smaller multi-label datasets. (c) BAcc for bigger binary datasets. (d) BAcc for bigger multi-label datasets. (e) Calc. time -smaller multi-label datasets. (f) Calc. time -bigger binary datasets.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: The Critical Difference diagrams for carried out experiments.</figDesc><graphic coords="6,72.00,296.83,221.13,100.62" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: The most important functionalities available in the RuleMiner application and presentation of the proposed usage scenario.</figDesc><graphic coords="7,72.01,90.94,451.26,617.87" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Figure 3 :</head><label>3</label><figDesc>Figure 3: One of the stages of automatic rule induction configuration.</figDesc><graphic coords="8,94.57,65.61,406.14,124.75" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Figure 4 :</head><label>4</label><figDesc>Figure 4: RuleMiner rules window.</figDesc><graphic coords="8,72.00,389.51,451.28,154.14" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_5"><head>Figure 5 :</head><label>5</label><figDesc>Figure 5: RuleMiner window with condition importance.</figDesc><graphic coords="8,139.69,592.29,315.89,112.70" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_6"><head>Figure 6 :</head><label>6</label><figDesc>Figure 6: RuleMiner window with information about predictive capabilities.</figDesc><graphic coords="9,74.26,65.60,446.77,267.86" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_7"><head>Fig. 7</head><label>7</label><figDesc>Fig.7presents the analysis of coverage of examples by the two selected rules. In the presented case, the option AND was selected, so it means that the RuleMiner shows only the examples that are covered by both rules. Other analyses can also be performed.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_8"><head>Figure 7 :</head><label>7</label><figDesc>Figure 7: RuleMiner coverage window.</figDesc><graphic coords="9,74.26,430.96,446.76,219.30" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_9"><head>Figure 8 :</head><label>8</label><figDesc>Figure 8: RuleMiner ruleset visualisation.</figDesc><graphic coords="10,72.00,65.61,451.28,279.14" type="bitmap" /></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_0">https://github.com/ruleminer/ruleminer/blob/main/reports/comparing_RuleKit_with_other_methods/classification_ summary.md</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_1">https://github.com/ruleminer/ruleminer/tree/main/reports/comparing_RuleKit_with_other_methods/results</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="3" xml:id="foot_2">https://github.com/ruleminer/datasets/tree/main/classification</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="4" xml:id="foot_3">https://altair.com/altair-rapidminer, last accessed 2024/06/07</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="5" xml:id="foot_4">https://github.com/ruleminer/ruleminer/blob/main/reports/comparing_RuleKit_with_other_methods/classification_ summary.md</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="6" xml:id="foot_5">https://github.com/ruleminer/ruleminer/blob/main/reports/comparing_RuleKit_with_other_methods/classification_ summary.md</note>
		</body>
		<back>
			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">RuleKit: A comprehensive suite for rule-based learning</title>
		<author>
			<persName><forename type="first">A</forename><surname>Gudyś</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Sikora</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ł</forename><surname>Wróbel</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.knosys.2020.105480</idno>
	</analytic>
	<monogr>
		<title level="j">Knowledge-Based Systems</title>
		<imprint>
			<biblScope unit="volume">194</biblScope>
			<biblScope unit="page">105480</biblScope>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">SCARI: Separate and conquer algorithm for action rules and recommendations induction</title>
		<author>
			<persName><forename type="first">M</forename><surname>Sikora</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Matyszok</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ł</forename><surname>Wróbel</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.ins.2022.06.026</idno>
	</analytic>
	<monogr>
		<title level="j">Information Sciences</title>
		<imprint>
			<biblScope unit="volume">607</biblScope>
			<biblScope unit="page" from="849" to="868" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">RuleXAI-A package for rule-based explanations of machine learning model</title>
		<author>
			<persName><forename type="first">D</forename><surname>Macha</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kozielski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ł</forename><surname>Wróbel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Sikora</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.softx.2022.101209</idno>
	</analytic>
	<monogr>
		<title level="j">SoftwareX</title>
		<imprint>
			<biblScope unit="volume">20</biblScope>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">GuideR: A guided separate-and-conquer rule learning in classification, regression, and survival settings</title>
		<author>
			<persName><forename type="first">M</forename><surname>Sikora</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ł</forename><surname>Wróbel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Gudyś</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.knosys.2019.02.019</idno>
	</analytic>
	<monogr>
		<title level="j">Knowledge-Based Systems</title>
		<imprint>
			<biblScope unit="volume">173</biblScope>
			<biblScope unit="page" from="1" to="14" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Machine learning based analysis of relations between antigen expression and genetic aberrations in childhood B-cell precursor acute lymphoblastic leukaemia</title>
		<author>
			<persName><forename type="first">J</forename><surname>Kulis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ł</forename><surname>Wawrowski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ł</forename><surname>Sędek</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ł</forename><surname>Wróbel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ł</forename><surname>Słota</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><forename type="middle">H</forename><surname>Van Der Velden</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Szczepański</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Sikora</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Clinical Medicine</title>
		<imprint>
			<biblScope unit="volume">11</biblScope>
			<biblScope unit="page">2281</biblScope>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Anchors: High-precision model-agnostic explanations</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">T</forename><surname>Ribeiro</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Singh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Guestrin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the AAAI Conference on Artificial Intelligence</title>
				<meeting>the AAAI Conference on Artificial Intelligence</meeting>
		<imprint>
			<date type="published" when="2018">2018</date>
			<biblScope unit="volume">32</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">On the Quasi-Minimal Solution of the Covering Problem</title>
		<author>
			<persName><forename type="first">R</forename><surname>Michalski</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the V. International Symposium on Information Processing (FCIP)</title>
				<meeting>the V. International Symposium on Information Processing (FCIP)<address><addrLine>Bled, Yugoslavia</addrLine></address></meeting>
		<imprint>
			<date type="published" when="1969">1969</date>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="page" from="123" to="125" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Separate-and-Conquer Rule Learning</title>
		<author>
			<persName><forename type="first">J</forename><surname>Fürnkranz</surname></persName>
		</author>
		<idno type="DOI">10.1023/A:1006524209794</idno>
	</analytic>
	<monogr>
		<title level="j">Artificial Intelligence Review</title>
		<imprint>
			<biblScope unit="volume">13</biblScope>
			<biblScope unit="page" from="3" to="54" />
			<date type="published" when="1999">1999</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<monogr>
		<title level="m" type="main">Foundations of Rule Learning</title>
		<author>
			<persName><forename type="first">J</forename><surname>Fürnkranz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Gamberger</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Lavrač</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2012">2012</date>
			<publisher>Springer Science &amp; Business Media</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Heuristic rule-based regression via dynamic reduction to classification</title>
		<author>
			<persName><forename type="first">F</forename><surname>Janssen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Fürnkranz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Twenty-Second International Joint Conference on Artificial Intelligence</title>
				<imprint>
			<publisher>Citeseer</publisher>
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">Learning rule sets from survival data</title>
		<author>
			<persName><forename type="first">Ł</forename><surname>Wróbel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Gudyś</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Sikora</surname></persName>
		</author>
		<idno type="DOI">10.1186/s12859-017-1693-x</idno>
	</analytic>
	<monogr>
		<title level="j">BMC Bioinformatics</title>
		<imprint>
			<biblScope unit="volume">18</biblScope>
			<biblScope unit="page">285</biblScope>
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Sequential covering rule induction algorithm for variable consistency rough set approaches</title>
		<author>
			<persName><forename type="first">J</forename><surname>Błaszczyński</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Słowiński</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Szeląg</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.ins.2010.10.030</idno>
	</analytic>
	<monogr>
		<title level="j">Information Sciences</title>
		<imprint>
			<biblScope unit="volume">181</biblScope>
			<biblScope unit="page" from="987" to="1002" />
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">On mining instance-centric classification rules</title>
		<author>
			<persName><forename type="first">J</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Karypis</surname></persName>
		</author>
		<idno type="DOI">10.1109/TKDE.2006.179</idno>
	</analytic>
	<monogr>
		<title level="j">IEEE Transactions on Knowledge and Data Engineering</title>
		<imprint>
			<biblScope unit="volume">18</biblScope>
			<biblScope unit="page" from="1497" to="1511" />
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Efficient learning of large sets of locally optimal classification rules</title>
		<author>
			<persName><forename type="first">V</forename><forename type="middle">Q P</forename><surname>Huynh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Fürnkranz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Beck</surname></persName>
		</author>
		<idno type="DOI">10.1007/s10994-022-06290-w</idno>
	</analytic>
	<monogr>
		<title level="j">Machine Learning</title>
		<imprint>
			<biblScope unit="volume">112</biblScope>
			<biblScope unit="page" from="571" to="610" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">Predictive Learning via Rule Ensembles</title>
		<author>
			<persName><forename type="first">J</forename><surname>Friedman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Popescu</surname></persName>
		</author>
		<idno type="DOI">10.1214/07-AOAS148</idno>
	</analytic>
	<monogr>
		<title level="j">The Annals of Applied Statistics</title>
		<imprint>
			<biblScope unit="volume">2</biblScope>
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">Maximum likelihood rule ensembles</title>
		<author>
			<persName><forename type="first">K</forename><surname>Dembczyński</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Kotłowski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Słowiński</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 25th International Conference on Machine Learning -ICML &apos;08</title>
				<meeting>the 25th International Conference on Machine Learning -ICML &apos;08<address><addrLine>Helsinki, Finland</addrLine></address></meeting>
		<imprint>
			<publisher>ACM Press</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="224" to="231" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">ENDER: A statistical framework for boosting decision rules</title>
		<author>
			<persName><forename type="first">K</forename><surname>Dembczyński</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Kotłowski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Słowiński</surname></persName>
		</author>
		<idno type="DOI">10.1007/s10618-010-0177-7</idno>
	</analytic>
	<monogr>
		<title level="j">Data Mining and Knowledge Discovery</title>
		<imprint>
			<biblScope unit="volume">21</biblScope>
			<biblScope unit="page" from="52" to="90" />
			<date type="published" when="2010">2010</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">The bagging and n 2-classifiers based on rules induced by MODLEM</title>
		<author>
			<persName><forename type="first">J</forename><surname>Stefanowski</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the International Conference on Rough Sets and Current Trends in Computing</title>
				<meeting>the International Conference on Rough Sets and Current Trends in Computing<address><addrLine>Uppsala, Sweden</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2004">2004</date>
			<biblScope unit="page" from="488" to="497" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">Boolean Decision Rules via Column Generation</title>
		<author>
			<persName><forename type="first">S</forename><surname>Dash</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Gunluk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Wei</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Advances in Neural Information Processing Systems</title>
				<imprint>
			<publisher>Curran Associates, Inc</publisher>
			<date type="published" when="2018">2018</date>
			<biblScope unit="volume">31</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">Interpretable decision sets: A joint framework for description and prediction</title>
		<author>
			<persName><forename type="first">H</forename><surname>Lakkaraju</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">H</forename><surname>Bach</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Leskovec</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</title>
				<meeting>the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2016">2016</date>
			<biblScope unit="page" from="1675" to="1684" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">Very Simple Classification Rules Perform Well on Most Commonly Used Datasets</title>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">C</forename><surname>Holte</surname></persName>
		</author>
		<idno type="DOI">10.1023/A:1022631118932</idno>
	</analytic>
	<monogr>
		<title level="j">Machine Learning</title>
		<imprint>
			<biblScope unit="volume">11</biblScope>
			<biblScope unit="page" from="63" to="90" />
			<date type="published" when="1993">1993</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">Orange: Data mining toolbox in python</title>
		<author>
			<persName><forename type="first">J</forename><surname>Demšar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Curk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Erjavec</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Č</forename><surname>Gorup</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Hočevar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Milutinovič</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Možina</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Polajnar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Toplak</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Starič</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Štajdohar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Umek</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Žagar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Žbontar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Žitnik</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Zupan</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">The Journal of Machine Learning Research</title>
		<imprint>
			<biblScope unit="volume">14</biblScope>
			<biblScope unit="page" from="2349" to="2353" />
			<date type="published" when="2013">2013</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<monogr>
		<title level="m" type="main">The WEKA Workbench. Online Appendix for &quot;Data Mining: Practical Machine Learning Tools and Techniques</title>
		<author>
			<persName><forename type="first">E</forename><surname>Frank</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">A</forename><surname>Hall</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><forename type="middle">H</forename><surname>Witten</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2016">2016</date>
			<publisher>Morgan Kaufmann</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<analytic>
		<title level="a" type="main">Arules -A computational environment for mining association rules and frequent item sets</title>
		<author>
			<persName><forename type="first">M</forename><surname>Hahsler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Gruen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Hornik</surname></persName>
		</author>
		<idno type="DOI">10.18637/jss.v014.i15</idno>
	</analytic>
	<monogr>
		<title level="j">Journal of Statistical Software</title>
		<imprint>
			<biblScope unit="volume">14</biblScope>
			<biblScope unit="page" from="1" to="25" />
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">Rseslib 3: Open Source Library of Rough Set and Machine Learning Methods</title>
		<author>
			<persName><forename type="first">A</forename><surname>Wojna</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Latkowski</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-319-99368-3_13</idno>
	</analytic>
	<monogr>
		<title level="m">Rough Sets</title>
				<meeting><address><addrLine>Cham</addrLine></address></meeting>
		<imprint>
			<publisher>Springer International Publishing</publisher>
			<date type="published" when="2018">2018</date>
			<biblScope unit="page" from="162" to="176" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<analytic>
		<title level="a" type="main">Learning Gradient Boosted Multi-label Classification Rules</title>
		<author>
			<persName><forename type="first">M</forename><surname>Rapp</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">L</forename><surname>Mencía</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Fürnkranz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V.-L</forename><surname>Nguyen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Hüllermeier</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-030-67664-3_8</idno>
	</analytic>
	<monogr>
		<title level="m">Machine Learning and Knowledge Discovery in Databases -European Conference</title>
				<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="124" to="140" />
		</imprint>
	</monogr>
	<note>ECML PKDD 2020, Proceedings</note>
</biblStruct>

<biblStruct xml:id="b26">
	<monogr>
		<title level="m" type="main">One Explanation Does Not Fit All: A Toolkit and Taxonomy of AI Explainability Techniques</title>
		<author>
			<persName><forename type="first">V</forename><surname>Arya</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">K E</forename><surname>Bellamy</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P.-Y</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Dhurandhar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Hind</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">C</forename><surname>Hoffman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Houde</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Q</forename><forename type="middle">V</forename><surname>Liao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Luss</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Mojsilović</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Mourad</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Pedemonte</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Raghavendra</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Richards</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Sattigeri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Shanmugam</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Singh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">R</forename><surname>Varshney</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Wei</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Zhang</surname></persName>
		</author>
		<idno type="DOI">10.48550/arXiv.1909.03012</idno>
		<idno type="arXiv">arXiv:1909.03012</idno>
		<imprint>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b27">
	<analytic>
		<title level="a" type="main">The CN2 Induction Algorithm</title>
		<author>
			<persName><forename type="first">P</forename><surname>Clark</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Niblett</surname></persName>
		</author>
		<idno type="DOI">10.1023/A:1022641700528</idno>
	</analytic>
	<monogr>
		<title level="j">Machine Learning</title>
		<imprint>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="page" from="261" to="283" />
			<date type="published" when="1989">1989</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b28">
	<monogr>
		<title level="m" type="main">PyIDS -Python Implementation of Interpretable Decision Sets Algorithm by Lakkaraju et al</title>
		<author>
			<persName><forename type="first">J</forename><surname>Filip</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Kliegr</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2016">2016. 2019</date>
			<publisher>RuleML+RR</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b29">
	<analytic>
		<title level="a" type="main">Fast effective rule induction</title>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">W</forename><surname>Cohen</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Twelfth International Conference on Machine Learning</title>
				<imprint>
			<publisher>Morgan Kaufmann</publisher>
			<date type="published" when="1995">1995</date>
			<biblScope unit="page" from="115" to="123" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b30">
	<analytic>
		<title level="a" type="main">Generating rule sets from model trees</title>
		<author>
			<persName><forename type="first">G</forename><surname>Holmes</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Hall</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Frank</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Twelfth Australian Joint Conference on Artificial Intelligence</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="1999">1999</date>
			<biblScope unit="page" from="1" to="12" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b31">
	<analytic>
		<title level="a" type="main">Imodels: A python package for fitting interpretable models</title>
		<author>
			<persName><forename type="first">C</forename><surname>Singh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Nasseri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><forename type="middle">S</forename><surname>Tan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Tang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Yu</surname></persName>
		</author>
		<idno type="DOI">10.21105/joss.03192</idno>
	</analytic>
	<monogr>
		<title level="j">Journal of Open Source Software</title>
		<imprint>
			<biblScope unit="volume">6</biblScope>
			<biblScope unit="page">3192</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b32">
	<analytic>
		<title level="a" type="main">KEEL: A software tool to assess evolutionary algorithms for data mining problems</title>
		<author>
			<persName><forename type="first">J</forename><surname>Alcalá-Fdez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Sánchez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>García</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">J</forename><surname>Del Jesus</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Ventura</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">M</forename><surname>Garrell</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Otero</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Romero</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Bacardit</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><forename type="middle">M</forename><surname>Rivas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">C</forename><surname>Fernández</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Herrera</surname></persName>
		</author>
		<idno type="DOI">10.1007/s00500-008-0323-y</idno>
	</analytic>
	<monogr>
		<title level="j">Soft Computing</title>
		<imprint>
			<biblScope unit="volume">13</biblScope>
			<biblScope unit="page" from="307" to="318" />
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b33">
	<monogr>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">R</forename><surname>Berthold</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Cebron</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Dill</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><forename type="middle">R</forename><surname>Gabriel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Kötter</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Meinl</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Ohl</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Sieb</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Thiel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Wiswedel</surname></persName>
		</author>
		<title level="m">KNIME: The Konstanz Information Miner, in: Data Analysis, Machine Learning and Applications</title>
				<meeting><address><addrLine>Berlin, Heidelberg</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="319" to="326" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b34">
	<analytic>
		<title level="a" type="main">QMAK: Interacting with Machine Learning Models and Visualizing Classification Process</title>
		<author>
			<persName><forename type="first">A</forename><surname>Wojna</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Jachim</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ł</forename><surname>Kosson</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ł</forename><surname>Kowalski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Mański</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Mański</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Mroczek</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Niemkiewicz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Piszczatowski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Próchniak</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Romańczuk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Skibiński</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Staszczyk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Szostakiewicz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Tur</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Wójcik</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Zuchniak</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">18th Conference on Computer Science and Intelligence Systems</title>
				<imprint>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="315" to="318" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b35">
	<analytic>
		<title level="a" type="main">eu: Web framework for interpretable machine learning based on rules and frequent itemsets</title>
		<author>
			<persName><forename type="first">S</forename><surname>Vojíř</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Zeman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Kuchař</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Kliegr</surname></persName>
		</author>
		<author>
			<persName><surname>Easyminer</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.knosys.2018.03.006</idno>
	</analytic>
	<monogr>
		<title level="j">Knowledge-Based Systems</title>
		<imprint>
			<biblScope unit="volume">150</biblScope>
			<biblScope unit="page" from="111" to="115" />
			<date type="published" when="2018">2018</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b36">
	<analytic>
		<title level="a" type="main">Editable machine learning models? A rule-based framework for user studies of explainability</title>
		<author>
			<persName><forename type="first">S</forename><surname>Vojíř</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Kliegr</surname></persName>
		</author>
		<idno type="DOI">10.1007/s11634-020-00419-2</idno>
	</analytic>
	<monogr>
		<title level="j">Advances in Data Analysis and Classification</title>
		<imprint>
			<biblScope unit="volume">14</biblScope>
			<biblScope unit="page" from="785" to="799" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b37">
	<analytic>
		<title level="a" type="main">Statistical comparisons of classifiers over multiple data sets</title>
		<author>
			<persName><forename type="first">J</forename><surname>Demšar</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Machine Learning Research</title>
		<imprint>
			<biblScope unit="volume">7</biblScope>
			<biblScope unit="page" from="1" to="30" />
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b38">
	<analytic>
		<title level="a" type="main">Symptom-based early-stage differentiation between SARS-CoV-2 versus other respiratory tract infections-Upper Silesia pilot study</title>
		<author>
			<persName><forename type="first">J</forename><surname>Mika</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Tobiasz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Zyla</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Papiez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Bach</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Werner</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kozielski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kania</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Gruca</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Piotrowski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Sobala-Szczygieł</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Włostowska</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Foszner</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Sikora</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Polanska</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Jaroszewicz</surname></persName>
		</author>
		<idno type="DOI">10.1038/s41598-021-93046-6</idno>
	</analytic>
	<monogr>
		<title level="j">Scientific Reports</title>
		<imprint>
			<biblScope unit="volume">11</biblScope>
			<biblScope unit="page">13580</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
