<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">Probabilistic Ontology and Knowledge Fusion for Procurement Fraud Detection in Brazil</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Rommel</forename><forename type="middle">N</forename><surname>Carvalho</surname></persName>
							<email>rommel.carvalho@gmail.com</email>
							<affiliation key="aff0">
								<orgName type="institution">George Mason University</orgName>
								<address>
									<addrLine>4400 University Drive</addrLine>
									<postCode>22030-4400</postCode>
									<settlement>Fairfax</settlement>
									<region>VA</region>
									<country key="US">USA</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Dipartimento di Informatica</orgName>
								<orgName type="institution">Università degli Studi di Bari</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff9">
								<orgName type="department">AI Group</orgName>
								<orgName type="institution">University of Bristol</orgName>
								<address>
									<postCode>BS8 1TR</postCode>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff12">
								<orgName type="institution">Hewlett Packard Laboratories</orgName>
								<address>
									<settlement>Bristol</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Kathryn</forename><forename type="middle">Blackmond</forename><surname>Laskey</surname></persName>
							<email>klaskey@gmu.edu</email>
							<affiliation key="aff0">
								<orgName type="institution">George Mason University</orgName>
								<address>
									<addrLine>4400 University Drive</addrLine>
									<postCode>22030-4400</postCode>
									<settlement>Fairfax</settlement>
									<region>VA</region>
									<country key="US">USA</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Dipartimento di Informatica</orgName>
								<orgName type="institution">Università degli Studi di Bari</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff9">
								<orgName type="department">AI Group</orgName>
								<orgName type="institution">University of Bristol</orgName>
								<address>
									<postCode>BS8 1TR</postCode>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff12">
								<orgName type="institution">Hewlett Packard Laboratories</orgName>
								<address>
									<settlement>Bristol</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Paulo</forename><forename type="middle">C G</forename><surname>Costa</surname></persName>
							<email>pcosta@gmu.edu</email>
							<affiliation key="aff0">
								<orgName type="institution">George Mason University</orgName>
								<address>
									<addrLine>4400 University Drive</addrLine>
									<postCode>22030-4400</postCode>
									<settlement>Fairfax</settlement>
									<region>VA</region>
									<country key="US">USA</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Dipartimento di Informatica</orgName>
								<orgName type="institution">Università degli Studi di Bari</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff9">
								<orgName type="department">AI Group</orgName>
								<orgName type="institution">University of Bristol</orgName>
								<address>
									<postCode>BS8 1TR</postCode>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff12">
								<orgName type="institution">Hewlett Packard Laboratories</orgName>
								<address>
									<settlement>Bristol</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Marcelo</forename><surname>Ladeira</surname></persName>
							<email>mladeira@unb.br</email>
							<affiliation key="aff1">
								<orgName type="institution">University of Brasilia Campus Universitário Darcy Ribeiro</orgName>
								<address>
									<postCode>DF, 70910-900</postCode>
									<settlement>Brasilia</settlement>
									<country key="BR">Brazil</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="department" key="dep1">Dipartimento di Elettronica</orgName>
								<orgName type="department" key="dep2">Informatica e Sistemistica</orgName>
								<orgName type="institution">Università della Calabria</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff10">
								<orgName type="laboratory">Intelligent Systems Lab</orgName>
								<orgName type="institution">BT</orgName>
								<address>
									<addrLine>Adastral Park</addrLine>
									<postCode>IP5 3RE</postCode>
									<settlement>Ipswich</settlement>
									<country key="GB">UK</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Laécio</forename><forename type="middle">L</forename><surname>Santos</surname></persName>
							<affiliation key="aff1">
								<orgName type="institution">University of Brasilia Campus Universitário Darcy Ribeiro</orgName>
								<address>
									<postCode>DF, 70910-900</postCode>
									<settlement>Brasilia</settlement>
									<country key="BR">Brazil</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="department" key="dep1">Dipartimento di Elettronica</orgName>
								<orgName type="department" key="dep2">Informatica e Sistemistica</orgName>
								<orgName type="institution">Università della Calabria</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff10">
								<orgName type="laboratory">Intelligent Systems Lab</orgName>
								<orgName type="institution">BT</orgName>
								<address>
									<addrLine>Adastral Park</addrLine>
									<postCode>IP5 3RE</postCode>
									<settlement>Ipswich</settlement>
									<country key="GB">UK</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Shou</forename><surname>Matsumoto</surname></persName>
							<affiliation key="aff1">
								<orgName type="institution">University of Brasilia Campus Universitário Darcy Ribeiro</orgName>
								<address>
									<postCode>DF, 70910-900</postCode>
									<settlement>Brasilia</settlement>
									<country key="BR">Brazil</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="department" key="dep1">Dipartimento di Elettronica</orgName>
								<orgName type="department" key="dep2">Informatica e Sistemistica</orgName>
								<orgName type="institution">Università della Calabria</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff10">
								<orgName type="laboratory">Intelligent Systems Lab</orgName>
								<orgName type="institution">BT</orgName>
								<address>
									<addrLine>Adastral Park</addrLine>
									<postCode>IP5 3RE</postCode>
									<settlement>Ipswich</settlement>
									<country key="GB">UK</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Claudia</forename><surname>D'amato</surname></persName>
							<email>claudia.damato@di.uniba.it</email>
							<affiliation key="aff0">
								<orgName type="institution">George Mason University</orgName>
								<address>
									<addrLine>4400 University Drive</addrLine>
									<postCode>22030-4400</postCode>
									<settlement>Fairfax</settlement>
									<region>VA</region>
									<country key="US">USA</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Dipartimento di Informatica</orgName>
								<orgName type="institution">Università degli Studi di Bari</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff9">
								<orgName type="department">AI Group</orgName>
								<orgName type="institution">University of Bristol</orgName>
								<address>
									<postCode>BS8 1TR</postCode>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff12">
								<orgName type="institution">Hewlett Packard Laboratories</orgName>
								<address>
									<settlement>Bristol</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Nicola</forename><surname>Fanizzi</surname></persName>
							<email>fanizzi@di.uniba.it</email>
							<affiliation key="aff0">
								<orgName type="institution">George Mason University</orgName>
								<address>
									<addrLine>4400 University Drive</addrLine>
									<postCode>22030-4400</postCode>
									<settlement>Fairfax</settlement>
									<region>VA</region>
									<country key="US">USA</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Dipartimento di Informatica</orgName>
								<orgName type="institution">Università degli Studi di Bari</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff9">
								<orgName type="department">AI Group</orgName>
								<orgName type="institution">University of Bristol</orgName>
								<address>
									<postCode>BS8 1TR</postCode>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff12">
								<orgName type="institution">Hewlett Packard Laboratories</orgName>
								<address>
									<settlement>Bristol</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Bettina</forename><surname>Fazzinga</surname></persName>
							<email>bfazzinga@deis.unical.it</email>
							<affiliation key="aff1">
								<orgName type="institution">University of Brasilia Campus Universitário Darcy Ribeiro</orgName>
								<address>
									<postCode>DF, 70910-900</postCode>
									<settlement>Brasilia</settlement>
									<country key="BR">Brazil</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="department" key="dep1">Dipartimento di Elettronica</orgName>
								<orgName type="department" key="dep2">Informatica e Sistemistica</orgName>
								<orgName type="institution">Università della Calabria</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff10">
								<orgName type="laboratory">Intelligent Systems Lab</orgName>
								<orgName type="institution">BT</orgName>
								<address>
									<addrLine>Adastral Park</addrLine>
									<postCode>IP5 3RE</postCode>
									<settlement>Ipswich</settlement>
									<country key="GB">UK</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Georg</forename><surname>Gottlob</surname></persName>
							<email>georg.gottlob@comlab.ox.ac.uk</email>
							<affiliation key="aff4">
								<orgName type="laboratory">Computing Laboratory</orgName>
								<orgName type="institution">University of Oxford</orgName>
								<address>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff5">
								<orgName type="department">Oxford-Man Institute of Quantitative Finance</orgName>
								<orgName type="institution">University of Oxford</orgName>
								<address>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff11">
								<orgName type="department">School of Computer Science and Engineering</orgName>
								<orgName type="institution">BeiHang University</orgName>
								<address>
									<settlement>Beijing</settlement>
									<country key="CN">China</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Thomas</forename><surname>Lukasiewicz</surname></persName>
							<email>thomas.lukasiewicz@comlab.ox.ac.uk</email>
							<affiliation key="aff4">
								<orgName type="laboratory">Computing Laboratory</orgName>
								<orgName type="institution">University of Oxford</orgName>
								<address>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff6">
								<orgName type="department">Institut für Informationssysteme</orgName>
								<orgName type="institution">TU Wien</orgName>
								<address>
									<country key="AT">Austria</country>
								</address>
							</affiliation>
							<affiliation key="aff11">
								<orgName type="department">School of Computer Science and Engineering</orgName>
								<orgName type="institution">BeiHang University</orgName>
								<address>
									<settlement>Beijing</settlement>
									<country key="CN">China</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">'</forename><surname>Amato</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Floriana</forename><surname>Esposito</surname></persName>
							<email>esposito@di.uniba.it</email>
						</author>
						<author>
							<persName><forename type="first">Trevor</forename><surname>Martin</surname></persName>
							<email>trevor.martin@bristol.ac.uk</email>
							<affiliation key="aff0">
								<orgName type="institution">George Mason University</orgName>
								<address>
									<addrLine>4400 University Drive</addrLine>
									<postCode>22030-4400</postCode>
									<settlement>Fairfax</settlement>
									<region>VA</region>
									<country key="US">USA</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University of Brasilia Campus Universitário Darcy Ribeiro</orgName>
								<address>
									<postCode>DF, 70910-900</postCode>
									<settlement>Brasilia</settlement>
									<country key="BR">Brazil</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Dipartimento di Informatica</orgName>
								<orgName type="institution">Università degli Studi di Bari</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="department" key="dep1">Dipartimento di Elettronica</orgName>
								<orgName type="department" key="dep2">Informatica e Sistemistica</orgName>
								<orgName type="institution">Università della Calabria</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff9">
								<orgName type="department">AI Group</orgName>
								<orgName type="institution">University of Bristol</orgName>
								<address>
									<postCode>BS8 1TR</postCode>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff10">
								<orgName type="laboratory">Intelligent Systems Lab</orgName>
								<orgName type="institution">BT</orgName>
								<address>
									<addrLine>Adastral Park</addrLine>
									<postCode>IP5 3RE</postCode>
									<settlement>Ipswich</settlement>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff12">
								<orgName type="institution">Hewlett Packard Laboratories</orgName>
								<address>
									<settlement>Bristol</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Zheng</forename><surname>Siyao</surname></persName>
							<email>zhengsyao@gmail.com</email>
							<affiliation key="aff0">
								<orgName type="institution">George Mason University</orgName>
								<address>
									<addrLine>4400 University Drive</addrLine>
									<postCode>22030-4400</postCode>
									<settlement>Fairfax</settlement>
									<region>VA</region>
									<country key="US">USA</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Dipartimento di Informatica</orgName>
								<orgName type="institution">Università degli Studi di Bari</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff4">
								<orgName type="laboratory">Computing Laboratory</orgName>
								<orgName type="institution">University of Oxford</orgName>
								<address>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff9">
								<orgName type="department">AI Group</orgName>
								<orgName type="institution">University of Bristol</orgName>
								<address>
									<postCode>BS8 1TR</postCode>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff11">
								<orgName type="department">School of Computer Science and Engineering</orgName>
								<orgName type="institution">BeiHang University</orgName>
								<address>
									<settlement>Beijing</settlement>
									<country key="CN">China</country>
								</address>
							</affiliation>
							<affiliation key="aff12">
								<orgName type="institution">Hewlett Packard Laboratories</orgName>
								<address>
									<settlement>Bristol</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Andrei</forename><surname>Majidian</surname></persName>
							<email>andrei.majidian@bt.com</email>
							<affiliation key="aff1">
								<orgName type="institution">University of Brasilia Campus Universitário Darcy Ribeiro</orgName>
								<address>
									<postCode>DF, 70910-900</postCode>
									<settlement>Brasilia</settlement>
									<country key="BR">Brazil</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="department" key="dep1">Dipartimento di Elettronica</orgName>
								<orgName type="department" key="dep2">Informatica e Sistemistica</orgName>
								<orgName type="institution">Università della Calabria</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff10">
								<orgName type="laboratory">Intelligent Systems Lab</orgName>
								<orgName type="institution">BT</orgName>
								<address>
									<addrLine>Adastral Park</addrLine>
									<postCode>IP5 3RE</postCode>
									<settlement>Ipswich</settlement>
									<country key="GB">UK</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Dave</forename><surname>Reynolds</surname></persName>
							<email>dave.e.reynolds@gmail.com</email>
							<affiliation key="aff0">
								<orgName type="institution">George Mason University</orgName>
								<address>
									<addrLine>4400 University Drive</addrLine>
									<postCode>22030-4400</postCode>
									<settlement>Fairfax</settlement>
									<region>VA</region>
									<country key="US">USA</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Dipartimento di Informatica</orgName>
								<orgName type="institution">Università degli Studi di Bari</orgName>
								<address>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
							<affiliation key="aff9">
								<orgName type="department">AI Group</orgName>
								<orgName type="institution">University of Bristol</orgName>
								<address>
									<postCode>BS8 1TR</postCode>
									<country key="GB">UK</country>
								</address>
							</affiliation>
							<affiliation key="aff12">
								<orgName type="institution">Hewlett Packard Laboratories</orgName>
								<address>
									<settlement>Bristol</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff7">
								<orgName type="department">Dipartimento di Informatica</orgName>
								<orgName type="institution">Università degli studi di Bari Campus Universitario</orgName>
								<address>
									<addrLine>Via Orabona 4</addrLine>
									<postCode>70125</postCode>
									<settlement>Bari</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff8">
								<orgName type="department">Department of Systems Engineering and Operations Research</orgName>
								<orgName type="institution">George Mason University</orgName>
								<address>
									<postCode>22030</postCode>
									<settlement>Fairfax</settlement>
									<region>VA</region>
									<country key="US">USA</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">Probabilistic Ontology and Knowledge Fusion for Procurement Fraud Detection in Brazil</title>
					</analytic>
					<monogr>
						<imprint>
							<date/>
						</imprint>
					</monogr>
					<idno type="MD5">1C6605962F0D54CB254E469D4D5C89F6</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-24T12:53+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Probabilistic Ontology, PR-OWL, Ontology, Procurement Fraud Detection, Knowledge Fusion, MEBN, UnBBayes First-Order Logic, Probability fuzzy taxonomy, creative knowledge discovery, fuzzy association rules, uncertainty in semantic web Uncertainty reasoning</term>
					<term>linked open data</term>
					<term>semantic web</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>It contains 6 technical papers and 3 position papers, which were selected in a rigorous reviewing process, where each paper was reviewed by at least four program committee members.</p><p>The International Semantic Web Conference is a major international forum for presenting visionary research on all aspects of the Semantic Web. The International Workshop on Uncertainty Reasoning for the Semantic Web is an exciting opportunity for collaboration and cross-fertilization between the uncertainty reasoning community and the Semantic Web community. Effective methods for reasoning under uncertainty are vital for realizing many aspects of the Semantic Web vision, but the ability of current-generation Web technology to handle uncertainty is extremely limited. Recently, there has been a groundswell of demand for uncertainty reasoning technology among Semantic Web researchers and developers. This surge of interest creates a unique opening to bring together two communities with a clear commonality of interest but little history of interaction. By capitalizing on this opportunity, URSW could spark dramatic progress toward realizing the Semantic Web vision.</p><p>Audience: The intended audience for this workshop includes the following: (1) researchers in uncertainty reasoning technologies with interest in Semantic Web and Webrelated technologies; (2) Semantic Web developers and researchers; (3) people in the knowledge representation community with interest in the Semantic Web; (4) ontology researchers and ontological engineers; (5) Web services researchers and developers with interest in the Semantic Web; and (6) developers of tools designed to support Semantic Web implementation, e.g., Jena, Protégé, and Protégé-OWL developers.</p><p>Topics: We intended to have an open discussion on any topic relevant to the general subject of uncertainty in the Semantic Web (including fuzzy theory, probability theory, and other approaches). Therefore, the following list should be just an initial guide:</p><p>(1) syntax and semantics for extensions to Semantic Web languages to enable representation of uncertainty; (2) logical formalisms to support uncertainty in Semantic Web languages; (3) probability theory as a means of assessing the likelihood that terms in different ontologies refer to the same or similar concepts; (4) architectures for applying plausible reasoning to the problem of ontology mapping; (5) using fuzzy approaches to deal with imprecise concepts within ontologies; (6) the concept of a probabilistic ontology and its relevance to the Semantic Web; (7) best practices for representing uncertain, incomplete, ambiguous, or controversial information in the Semantic Web; (8) the role of uncertainty as it relates to Web services; (9) interface protocols with support for uncertainty as a means to improve interoperability among Web services; (10) uncertainty reasoning techniques applied to trust issues in the Semantic Web; (11) existing implementations of uncertainty reasoning tools in the context of the Semantic Web;</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>A primary responsibility of the Brazilian Office of the Comptroller General (CGU) is to prevent and detect government corruption. To carry out this mission, CGU must gather information from a variety of sources and combine it to evaluate whether further action, such as an investigation, is required. One of the most difficult challenges is the information explosion. Auditors must fuse vast quantities of information from a variety of sources in a way that highlights its relevance to decision makers and helps them focus their efforts on the most critical cases. This is no trivial duty. The Growing Acceleration Program (PAC) alone has a budget greater than 250 billion dollars with more than one thousand projects only on the state of Sao Paulo (http://www.brasil.gov.br/pac/). All of these have to be audited and inspected by CGU -and, in spite having only three thousand employees. Therefore, CGU must optimize its processes in order to carry out its mission.</p><p>The Semantic Web (SW), like the document web that preceded it, is based on radical notions of information sharing. These ideas <ref type="bibr">[1]</ref> include: (i) the Anyone can say Anything about Any topic (AAA) slogan; (ii) the open world assumption, in which we assume there is always more information that could be known, and (iii) nonunique naming, which appreciates the reality that different speakers on the Web might use different names to define the same entity. In a fundamental departure from assumptions of traditional information systems architectures, the Semantic Web is intended to provide an environment in which information sharing can thrive and a network effect of knowledge synergy is possible. But this style of information gathering can generate a chaotic landscape rife with confusion, disagreement and conflict.</p><p>We call an environment characterized by the above assumptions a Radical Information Sharing (RIS) environment. The challenge facing SW architects is therefore to avoid the natural chaos to which RIS environments are prone, and move to a state characterized by information sharing, cooperation and collaboration. According to <ref type="bibr">[1]</ref>, one solution to this challenge lies in modeling, and this is where ontologies languages like Web Ontology Language (OWL) come in.</p><p>As it will be shown in Section 3, the domain of procurement fraud detection is a RIS environment. However, uncertainty is ubiquitous to knowledge fusion. Uncertainty is especially important to applications such as fraud detection, in which perpetrators seek to conceal illicit intentions and activities, making crisp assertions extremely hard and rare. In such environments, partial (not complete) or approximate (not exact) information is more the rule than the exception.</p><p>Bayesian networks (BNs) have been widely applied to draw inferences to information and knowledge fusion in the presence of uncertainty. However, according to <ref type="bibr">[2]</ref> BNs are not expressive enough for many real-world applications. More specifically, BNs assume a simple attribute-value representation -that is, each problem instance involves reasoning about the same fixed number of attributes, with only the evidence values changing from problem instance to problem instance. Complex problems on the scale of the semantic web often involve intricate relationships among many variables, and the limited representational power of BNs is insufficient for building useful, detailed models.</p><p>Multi-Entity Bayesian Network (MEBN) logic can represent and reason with uncertainty about any propositions that can be expressed in first-order logic <ref type="bibr">[3]</ref>. Probabilistic OWL (PR-OWL) uses MEBN's strengths to provide a framework for building probabilistic ontologies (PO), a major step towards semantically aware, probabilistic knowledge fusion systems <ref type="bibr">[4]</ref>. This paper uses PR-OWL to design and test a model for fusing information to detect possible frauds in procurements involving Federal funds.</p><p>The paper is organized as follows. Section 2 introduces Multi-Entity Bayesian Networks (MEBN), an expressive Bayesian logic, and PR-OWL, an extension of the OWL language that can represent probabilistic ontologies having MEBN as its underlying logic. Section 3 presents a case study from CGU to demonstrate the power of PR-OWL ontologies for knowledge representation and fusion. Finally, Section 4 presents some concluding remarks.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">MEBN and PR-OWL</head><p>Multi-Entity Bayesian Networks (MEBN) <ref type="bibr">[5 and 6]</ref> extend BNs (BN) to achieve firstorder expressive power. MEBN represents knowledge as a collection of MEBN Fragments (MFrags), which are organized into MEBN Theories (MTheories).</p><p>An MFrag contains random variables (RVs) and a fragment graph representing dependencies among these RVs. An MFrag is a template for a fragment of a Bayesian network. It is instantiated by binding its arguments to domain entity identifiers to create instances of its RVs. There are three kinds of RV: context, resident and input. Context RVs represent conditions that must be satisfied for the distributions represented in the MFrag to apply. Input nodes represent RVs that may influence the distributions defined in the MFrag, but whose distributions are defined in other MFrags. Distributions for resident RV instances are defined in the MFrag. Distributions for resident RVs are defined by specifying local distributions conditioned on the values of the instances of their parents in the fragment graph.</p><p>A set of MFrags represents a joint distribution over instances of its random variables. MEBN provides a compact way to represent repeated structure in a BN. An important advantage of MEBN is that there is no fixed limit on the number of RV instances, and the random variable instances are dynamically instantiated as needed.</p><p>An MTheory is a set of MFrags that satisfies conditions of consistency ensuring the existence of a unique joint probability distribution over its random variable instances.</p><p>To apply an MTheory to reason about particular scenarios, one needs to provide the system with specific information about the individual entity instances involved in the scenario. On receipt of this information, Bayesian inference can be used both to answer specific questions of interest (e.g., how likely is it that a particular procurement is being directed to a specific enterprise?) and to refine the MTheory (e.g., each new tactical situation includes additional statistical data about the likelihood of a given attack for that set of circumstances). Bayesian inference is used to perform both problem specific inference and learning in a sound, logically coherent manner (for more details see <ref type="bibr">[6 and 7]</ref>).</p><p>State-of-the-art systems are increasingly adopting ontologies as a means to ensure formal semantic support for knowledge sharing <ref type="bibr">[8, 9, 10, 11, 12, and 13]</ref>. Representing and reasoning with uncertainty is becoming recognized as an essential capability in many domains. A common error is to provide support for uncertainty representation by just annotating ontologies with numerical probabilities. This approach leads to brittleness, as too much information is lost due to the lack of a representational scheme that can capture structural nuances of the probabilistic information. More expressive representation formalisms are needed <ref type="bibr">[4]</ref>. Probabilistic Ontologies (PR-OWL) <ref type="bibr">[14 and 15]</ref> was proposed as a more expressive formalism for representing knowledge in domains characterized by uncertainty. Figure <ref type="figure" target="#fig_0">1</ref> presents the main concepts needed to define an MTheory in PR-OWL. In the diagram, the ellipses represent the general classes, while the arcs represent the main relationships among the classes.</p><p>The procurement fraud detection probabilistic ontology was built in UnBBayes-MEBN, a tool for building and reasoning with PR-OWL probabilistic ontologies. UnBBayes-MEBN was the first software to implement PR-OWL/MEBN (see <ref type="bibr">[16,</ref><ref type="bibr">17,</ref><ref type="bibr">18,</ref><ref type="bibr">19]</ref> for more details). UnBBayes-MEBN supports Multi-Entity Bayesian Network (MEBN) and enables creation and editing of Probabilistic Ontologies in PR-OWL <ref type="bibr">[18]</ref>. The MEBN/PR-OWL Graphical User Interface (GUI) <ref type="bibr">[16]</ref> allows users to define MFrags and make probabilistic queries. UnBBayes-MEBN also implements an algorithm for generating a Situation Specific Bayesian Network (SSBN) <ref type="bibr">[18,</ref><ref type="bibr">19]</ref>, which is an ordinary BN created by instantiating instances of the MFrags to respond to a probabilistic query. Once the SSBN is generated, the inference engine (Reasoning) is called to process findings and update beliefs. UnBBayes-MEBN uses the Protégé-OWL library to load and save PR-OWL files (IO) in a format compatible with OWL. It supports first order logic context node evaluation (FOL), through the use of the PowerLoom library. It also defines and implements a built-in mechanism for typing and recursion. Finally, it permits the definition of dynamic conditional probabilistic tables.</p><p>UnBBayes has proven to be a simple, yet powerful, tool for designing probabilistic ontologies and for uncertain reasoning in complex situations such as procurement fraud detection. It is straightforward to use and provides powerful features (e.g. dynamic table) not available in systems (e.g., Quiddity) previously employed to reason with PR-OWL/MEBN knowledge bases.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Procurement Fraud Detection</head><p>A major source of corruption is the procurement process. Although laws attempt to ensure a competitive and fair process, perpetrators find ways to turn the process to their advantage while appearing to be legitimate. This is why a specialist has didactically structured the different kinds of procurement frauds CGU has dealt with in past years.</p><p>These different fraud types are characterized by criteria, such as business owners who work as a front for the company, use of accounting indices that are not common practice, etc. Indicators have been established to help identify cases of each of these fraud types. For instance, one principle that must be followed in public procurement is that of competition. Every public procurement should establish minimum requisites necessary to guarantee the execution of the contract in order to maximize the number of participating bidders. Nevertheless, it is common to have a fake competition when different bidders are, in fact, owned by the same person. This is usually done by having someone as a front for the enterprise, which is often someone with little or no education.</p><p>The ultimate goal of this case study is to structure the specialist knowledge in a way that an automated system can reason with the evidence in a manner similar to the specialist. Such an automated system is intended to support specialists and to help train new specialists, but not to replace them. Initially, a few simple criteria were selected as a proof of concept. Nevertheless, it is shown that the model can be incrementally updated to incorporate new criteria. In this process, it becomes clear that a number of different sources must be consulted to come up with the necessary indicators to create new and useful knowledge for decision makers about the procurements.  As the focus of the work is in representing the specialist knowledge and reasoning through probabilistic ontologies and not in the collection of information, the idea is that the analysts that work at CGU, already making audits and inspections, accomplish the collection of information through questionnaires that can specifically be created for the collecting of indicators for the selected criteria (Information Gathering). These questionnaires can be created using a system that is already in production at CGU. Once they are answered the necessary information is going to be available (DB -Information). Hence, UnBBayes, using the probabilistic ontology designed by experts (Design -UnBBayes), will be able to collect these millions of items of information and transform them into dozens or hundreds of items of knowledge, through logic and probabilistic inference, e.g. procurement announcements, contracts, reports, etc -a huge amount of data -are analyzed allowing the gathering of relevant relations and properties -a large amount of information -which in turn are used to draw some conclusions about possible irregularities -a smaller number of items of knowledge (Inference -Knowledge). This knowledge can be filtered so that only the procurements that show a probability higher than a threshold, e.g. 20%, are automatically forwarded to the responsible department along with the inferences about potential fraud and the supporting evidence (Report for Decision Makers).</p><p>The criteria selected by the specialist were the use of accounting indices and the demand of experience in just one contract. There are four common types of indices that are usually used as requirements in procurements (ILC, ILG, ISG, and IE). Any other type could indicate a made-up index specifically designed to direct the procurement to some specific company. The greater the numbers of uncommon accounting indices used by the procurement the more suspicious it is, i.e. the higher the chance of having fraud. In addition, a procurement specifies a minimum value for these accounting indices. The minimum value that is usually required is 1.0. The higher this minimum value, the more the competition is narrowed, and therefore the higher the chance the procurement is being directed to some company. The other criterion, demanding proof of experience in only one contract, is suspect because in almost every case, the experience is not gained only by a particular contract, but also by doing it over and over again in different contracts. It does not matter if you have built 1,000 ft2 of wall in just one contract or 100 ft2 in 10 different contracts. The experience gained will be basically the same.</p><p>The procurement fraud detection model was developed as a probabilistic ontology (using PR-OWL) to define its semantics and uncertain characteristics. The MTheory created for the model, using UnBBayes-MEBN, was divided into three different MFrags.</p><p>The first, Figure <ref type="figure" target="#fig_3">3</ref>, presents the criteria required from a company to participate in the procurement, containing information about the type of accounting index (ILC, ILG, ISG, IE, and Other) and the minimum value for it (between 0 and 1, between 1 and 2, between 2 and 3, and greater than 3). This MFrag also contains information about where a specific index is used (which procurement), and if the procurement demands experience in only one contract. The second, Figure <ref type="figure" target="#fig_4">4</ref>, represents whether procurement is being directed to a specific company by the use of unusual accounting indices. As explained before, this analysis is based on the type of the index and the minimum value it requires. This evaluation takes into consideration every index used in a specific procurement, hence it is dynamic.</p><p>The last MFrag, Figure <ref type="figure" target="#fig_5">5</ref>, represents the overall possibility that procurement is being directed to a specific company based on the result of its being directed by the use of unusual indices and by the requirement of experience in only one contract, as explained before. To test the model, two scenarios, that represent the two groups of suspect and non suspect procurements, were chosen from a set of real cases, as shown:</p><p>• o It does not demand experience in only one contract. The information above was introduced in our model as known entities and findings. After that we queried the system to give us information about the node IsProcurementDirected(proc) for both proc1 and proc2. UnBBayes-MEBN than executed the SSBN algorithm and generated the same node structure as shown in Figure <ref type="figure" target="#fig_7">6</ref>, because both procurements have three accounting indices and information about the demanding experience in only one contract. However, as expected, the parameters and findings are different giving different results to the query, as shown below:</p><p>• Non suspect procurement: o 0.01% that the procurement was directed to a specific company by using accounting indices; o 0.10% that the procurement was directed to a specific company.</p><p>• Suspect procurement: o 55.00% that the procurement was directed to a specific company by using accounting indices; o 29.77%, when the information about demanding experience in only one contract was omitted, and 72.00%, when it was given, that the procurement was directed to a specific company. The specialist analyzed and agreed with the knowledge generated by the probabilistic ontology reasoned developed using PR-OWL/MEBN in UnBBayes. He stated that the probabilities represent, semantically (i.e. high, medium, and low chance), what he would think when analyzing the same entities and findings.</p><p>Although the SSBNs generated for this proof of concept present the same structure, it is common to have a different one as the context varies from procurement to procurement. For instance, we have come across several procurements that have all four common indices and some other different ones. In this case, if there are two additional indices (ind5 and ind6), then the resulting SSBN would have two more copies for nodes IndexType(index) andIndexMinValue(index). This would make the use of BN not applicable. The ability to make multiple copies of nodes based on a context is only available in a more expressive formalism, as MEBN. An additional capability not available with BN is to specify constraints on applicability of knowledge. Such constraints can only be implemented in a more expressive language. As we are dealing with BN formalism it is only natural to think of a formalism that extends BN. MEBN, as a Bayesian first-order logic, makes it possible to define these constraints using FOL.</p><p>Figure <ref type="figure" target="#fig_8">7</ref> presents the constraints (context nodes) necessary to model the fraud detection scenarios considered here. In this MFrag, the criterion is to identify if there is a suspicious business relationship between enterprises entA and entB. The more cases where enterprise B wins a procurement that the basic project was developed by enterprise A, the higher the chance they have some kind of personal business relationship, which means that it is more likely that enterprise B is developing the basic projects in such a way that will favor enterprise A, inhibiting the desired competition. Since the designed model is restricted to just two criteria, the team started to think about other criteria that could be incorporated and tested further. Figure <ref type="figure" target="#fig_9">8</ref> presents the suggested MFrag for detecting owners that act as a front to the real owner of the company (the person who really has the power to make decisions and that gets all the money), by looking up their socio-economic attributes and checking the size of the company. In other words, if a company is highly profitable, yet has an owner with little education, low income, no car, no house, etc, then the company is probably a front. From the criteria presented and modeled in this Section, we can clearly see the need for a principled way of dealing with uncertainty. But what is the role of Semantic Web in this domain? Well, it is easy to see that our domain of fraud detection is a RIS environment. The data CGU has available does not come only from its audits and inspections. In fact, much complementary information can be retrieved from other Federal Agencies, including Federal Revenue Agency, Federal Police, and others. Imagine we have information about the enterprise that won the procurement, and we want to know information about its owners, such as their personal data and annual income. This type of information is not available at CGU's Data Base (DB), but must be retrieved from the Federal Revenue Agency's DB. Once the information about the owners is available, it might be useful to check their criminal history. For that (see Figure <ref type="figure" target="#fig_10">9</ref>), information from the Federal Police must be used. In this example, we have different sources saying different things about the same person: thus, the AAA slogan applies. Moreover, there might be other Agencies with crucial information related to our person of interest; in other words, we are operating in an open world. Finally, to make this sharing and integration process possible, we have to make sure we are talking about the same person, who may (especially in case of fraud) be known by different names in different contexts.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Conclusion</head><p>The problem that CGU and many other Agencies have faced of processing all the available data into useful knowledge is starting to be solved with the use of Prob. Ontology and Knowledge Fusion for Procurement Fraud Detection in Brazil 13 probabilistic ontologies, as the procurement fraud detection model showed. Besides fusing the information available, the designed model was able to represent the specialist knowledge for the two real cases we evaluated. UnBBayes reasoning given the evidence and using the designed model were accurate both in suspicious and non suspicious scenarios. These results are encouraging, suggesting that a fuller development of our proof of concept system is promising.</p><p>In addition, it is fairly easy to introduce new criteria and indicators in the model in an incremental way. Thus, new rules for identifying fraud can be added without rework. After a new rule is incorporated into the model, a set of new tests can be added to the previous one with the objective of always validating the new model proposed, without doing everything from scratch.</p><p>Furthermore, the use of this formalism through UnBBayes allows advantages such as impartiality in the judgment of irregularities in procurements (given the same conditions the system will always deliver the same result), scalability (capacity to analyze thousands of procurements in a short time when compared to human capacity) and a joint analysis of large volumes of indicators (the higher the number of indicators to examine jointly the more difficult it is for the specialist analysis to be objective and consistent).</p><p>As a next step, CGU is choosing new criteria to be incorporated into the designed probabilistic ontology. This next set of criteria will require information from different Brazilian Agencies' databases. Therefore, the semantic power of ontologies with the uncertainty handling capability of PR-OWL will be extremely useful for fusing information from multiple databases.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Web search <ref type="bibr">[3]</ref> as the key technology of the Web is about to change radically with the development of the Semantic Web <ref type="bibr">[2]</ref>. As a consequence, the elaboration of a new search technology for the Semantic Web, called Semantic Web search <ref type="bibr">[6]</ref>, is currently an extremely hot topic, both in Web-related companies and in academic research. In particular, there is a fast growing number of commercial and academic Semantic Web search engines. The research can be roughly divided into two main directions. The first (and most common) one is to develop a new form of search for searching the pieces of data and knowledge that are encoded in the new representation formalisms of the Semantic Web (e.g., <ref type="bibr">[6]</ref>), while the second (and less explored) direction is to use the data and knowledge of the Semantic Web in order to add some semantics to Web search (e.g., <ref type="bibr">[9]</ref>).</p><p>A very promising recent representative of the second direction to Semantic Web search has been presented in <ref type="bibr">[8]</ref>. The approach is based on (i) using ontological (unions of) conjunctive queries (which may contain negated subqueries) as Semantic Web search queries, (ii) combining standard Web search with ontological background knowledge, (iii) using the power of Semantic Web formalisms and technologies, and (iv) using standard Web search engines as the main inference motor of Semantic Web search. It consists of an offline ontology compilation step, based on deductive reasoning techniques, and an online query processing step. In this paper, we propose to further enhance this approach to Semantic Web search by the use of inductive reasoning techniques for the offline ontology compilation step. To our knowledge, this is the first combination of Semantic Web search with inductive reasoning. The paper's main contributions can be summarized as follows:</p><p>-We develop a combination of Semantic Web search as presented in <ref type="bibr">[8]</ref> with an inductive reasoning technique (based on similarity search <ref type="bibr">[11]</ref> for retrieving the resources that likely belong to a query concept <ref type="bibr">[5]</ref>). The latter serves in an offline ontology compilation step to compute completed semantic annotations. -Importantly, the new approach to Semantic Web search can handle inconsistencies, noise, and incompleteness in Semantic Web knowledge bases, which are all very likely to occur in distributed and heterogeneous environments, such as the Web. We provide several examples illustrating this important advantage of the new approach. -We report on a prototype implementation of the new approach in the context of desktop search. We also provide very positive experimental results for the precision and the recall of the new approach, comparing it to the deductive approach in <ref type="bibr">[8]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">System Overview</head><p>The overall architecture of our Semantic Web search system is shown in Fig. <ref type="figure" target="#fig_0">1</ref>. It consists of the Interface, the Query Evaluator, and the Inference Engine (Fig. <ref type="figure" target="#fig_0">1</ref>, dark parts), where the Query Evaluator is implemented on top of standard Web Search Engines. Standard Web pages and their objects are enriched by Annotation pages, based on an Ontology. We thus assume that there are semantic annotations to standard Web pages and to objects on standard Web pages. Note that such annotations are starting to be widely available for a large class of Web resources, especially with the Web 2.0. Semantic annotations about Web pages and objects may also be automatically learned from the Web pages and the objects to be annotated (see, e.g., <ref type="bibr">[4]</ref>), and/or they may be extracted from existing ontological knowledge bases on the Semantic Web. Another important standard assumption that we make is that Web pages and their objects have unique identifiers.</p><p>For example, in a very simple scenario, a Web page i 1 may contain information about a Ph.D. student i 2 , called Mary, and two of her papers, namely, a conference paper i 3 entitled "Semantic Web search" and a journal paper i 4 entitled "Semantic Web search engines" and published in 2008. A simple HTML page representing this scenario is shown in Fig. <ref type="figure" target="#fig_1">2</ref>, left side. There may now exist one semantic annotation each for the Web page, the Ph.D. student Mary, the journal paper, and the conference paper. The annotation for the Web page may simply encode that it mentions Mary and the two papers, while the one for Mary may encode that she is a Ph.D. student with the name Mary and the author of the papers i 3 and i 4 . The annotation for the paper i 3 may encode that i 3 is a conference paper and has the title "Semantic Web search", while the one for the paper i 4 may encode that i 4 is a journal paper, authored by Mary, has the title "Semantic Web search engines", was published in 2008, and has the keyword "RDF". The semantic annotations of i 1 , i 2 , i 3 , and i 4 are formally expressed as the sets of axioms A i1 , A i2 , A i3 , and A i4 , respectively:</p><formula xml:id="formula_0">Ai 1 = {contains(i1, i2), contains(i1, i3), contains(i1, i4)}, Ai 2 = {PhDStudent(i2), name(i2, "mary"), isAuthorOf(i2, i3), isAuthorOf(i2, i4)}, Ai 3 = {ConferencePaper(i3), title(i3, "Semantic Web search")}, Ai 4 = {JournalPaper(i4), hasAuthor(i4, i2), title(i4, "Semantic Web search engines"), yearOfPublication(i4, 2008), keyword(i4, "RDF")}.</formula><p>(</p><formula xml:id="formula_1">)<label>1</label></formula><p>Inference Engine. Using an ontology containing some background knowledge, these semantic annotations are then further enhanced in an offline ontology compilation step, where the Inference Engine adds all properties that can be deduced from the semantic  annotations and the ontology. In <ref type="bibr">[8]</ref>, we assume a deductive such step, while here we propose and explore an inductive one. The resulting (completed) semantic annotations are then published as Web pages, so that they can be searched by standard Web search engines. For example, an ontology may contain the knowledge that (i) conference and journal papers are articles, (ii) conference papers are not journal papers, (iii) isAuthorOf relates scientists and articles, (iv) isAuthorOf is the inverse of hasAuthor, and (v) hasFirstAuthor is a functional binary relationship, which is formally expressed by:</p><p>ConferencePaper Article, JournalPaper Article, ConferencePaper ¬JournalPaper, ∃isAuthorOf Scientist, ∃isAuthorOf − Article, isAuthorOf − hasAuthor, hasAuthor − isAuthorOf, (funct hasFirstAuthor).</p><p>(</p><formula xml:id="formula_2">)<label>2</label></formula><p>Using this ontological knowledge, we can derive from the above annotations that the two papers i 3 and i 4 are also articles, and both authored by John. These resulting searchable (completed) semantic annotations of (objects on) standard Web pages are published as HTML Web pages with pointers to the respective object pages, so that they (in addition to the standard Web pages) can be searched by standard search engines. For example, the HTML pages for the completed semantic annotations of the above A i1 , A i2 , A i3 , and A i4 are shown in Fig. <ref type="figure" target="#fig_1">2</ref>, right side. Note that on the HTML page of each individual, its identifier is located beside the atomic concept below the row specifying the URIs. Practically, such an identifier may simply be the HTML address of the Web page/object's annotation page. For example, considering the HTML pages of Fig. <ref type="figure" target="#fig_1">2</ref>, the individual described by p 4 is i 4 , and the one described by p 2 is i 2 . Observe that we use a plain textual representation of the completed semantic annotations in order to allow their processing by existing standard search engines for the Web. It is important to point out that this textual representation is simply a list of properties, each eventually along with an identifier or a data value as attribute value, and it can thus immediately be encoded as a list of RDF triples.</p><p>Query Evaluator. The Query Evaluator (see Fig. <ref type="figure" target="#fig_0">1</ref>) reduces each Semantic Web search query of the user in an online query processing step to a sequence of standard Web search queries on standard Web and annotation pages, which are then processed by a standard Web Search Engine. The Query Evaluator also collects the results and re-transforms them into a single answer which is returned to the user. As an example of a Semantic Web search query, one may ask for all Ph.D. students who have published an article in 2008 with RDF as a keyword, which is formally expressed as follows:</p><formula xml:id="formula_3">Q(x) = ∃y (PhDStudent(x) ∧ isAuthorOf(x, y) ∧ Article(y) ∧ yearOfPublication(y, 2008) ∧ keyword(y, "RDF ")) .</formula><p>This query is transformed into the two queries Q 1 = PhDStudent AND isAuthorOf and Q 2 = Article AND "yearOfPublication 2008" AND "keyword RDF", which can both be submitted to a standard Web search engine, such as Google. The result of the original query Q is then built from the results of the two queries Q 1 and Q 2 . Note that a graphical user interface, such as the one of Google's advanced search, or even a natural language interface can help to hide the conceptual complexity of ontological queries to the user.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Semantic Web Search</head><p>We now introduce Semantic Web knowledge bases and the syntax and semantics of Semantic Web search queries to such knowledge bases. We then generalize the PageRank technique to our approach. We assume the reader is familiar with the syntax and the semantics of Description Logics (DLs) <ref type="bibr">[1]</ref>, which we use as underlying ontology languages.</p><p>Semantic Web Knowledge Bases. Intuitively, a Semantic Web knowledge base consists of a background TBox and a collection of ABoxes, one for every concrete Web page and for every object on a Web page. For example, the homepage of a scientist may be such a concrete Web page and be associated with an ABox, while the publications on the homepage may be such objects, which are also associated with one ABox each. We assume pairwise disjoint sets D, A, R A , R D , I, and V of atomic datatypes, atomic concepts, atomic roles, atomic attributes, individuals, and data values, respectively. Let I be the disjoint union of two sets P and O of Web pages and Web objects, respectively. Informally, every p ∈ P is an identifier for a concrete Web page, while every o ∈ O is an identifier for a concrete object on a concrete Web page. We assume the atomic roles links to between Web pages and contains between Web pages and Web objects. The former represents the link structure between concrete Web pages, while the latter encodes the occurrences of concrete Web objects on concrete Web pages. Informally, a Semantic Web knowledge base consists of some background terminological knowledge and some assertional knowledge for every concrete Web page and for every concrete object on a Web page. The background terminological knowledge may be an ontology from some global Semantic Web repository or an ontology defined locally by the user site. In contrast to the background terminological knowledge, the assertional knowledge will be directly stored on the Web (on annotation pages like the described standard Web pages) and is thus accessible via Web search engines.</p><p>Example 1. (Scientific Database). We use a DL knowledge base KB = (T , A) to specify some simple information about scientists and their publications. The sets of atomic concepts, atomic roles, atomic attributes, and data values are: A = {Scientist, Article, ConferencePaper, JournalPaper}, RA = {hasAuthor, isAuthorOf, contains}, RD = {name, title, yearOfPublication}, V = {"mary", "Semantic Web search", 2008, "Semantic Web search engines"}.</p><p>Let I = P ∪ O be the set of individuals, where P = {i 1 } is the set of Web pages, and O = {i 2 , i 3 , i 4 } is the set of Web objects on the Web page i 1 . The TBox T contains the axioms in Eq. 2. Then, a Semantic Web knowledge base is given by KB = (T , (A a ) a ∈ P∪O ), where the semantic annotations of the individuals in P ∪ O are the ones in Eq. 1.</p><p>Semantic Web Search Queries. We use unions of conjunctive queries with negated conjunctive subqueries as Semantic Web search queries to Semantic Web knowledge bases. We now first define the syntax of Semantic Web search queries and then the semantics of positive and general such queries.</p><p>Syntax. Let X be a finite set of variables. A term is either a Web page p ∈ P, a Web object o ∈ O, a data value v ∈ V, or a variable x ∈ X. An atomic formula (or atom) α is of one of the following forms: (i) d(t), where d is an atomic datatype, and t is a term; (ii) A(t), where A is an atomic concept, and t is a term; (iii) P (t, t ), where P is an atomic role, and t, t are terms; and (iv) U (t, t ), where U is an atomic attribute, and t, t are terms. An equality has the form =(t, t ), where t and t are terms. A conjunctive formula ∃y φ(x, y) is an existentially quantified conjunction of atoms α and equalities =(t, t ), which have free variables among x and y. Definition 2. A Semantic Web search query Q(x) is an expression n i=1 ∃y i φ i (x, y i ), where each φ i with i ∈ {1, . . . , n} is a conjunction of atoms α (also called positive atoms), negated conjunctive formulas not ψ, and equalities =(t, t ), which have free variables among x and y i , and the x's are exactly the free variables of n i=1 ∃y i φ i (x, y i ). Intuitively, Semantic Web search queries are unions of conjunctive queries, which may contain negated conjunctive queries in addition to atoms and equalities as conjuncts.</p><p>Example 2. (Scientific Database cont'd). Two Semantic Web search queries are: Q1(x) = (Scientist(x) ∧ not doctoralDegree(x, "oxford university") ∧ worksFor(x, "oxford university")) ∨ (Scientist(x) ∧ doctoralDegree(x, "oxford university") ∧ not worksFor(x, "oxford university")); Q2(x) = ∃y (Scientist(x) ∧ worksFor(x, "oxford university") ∧ isAuthorOf(x, y)∧ not ConferencePaper(y) ∧ not ∃z yearOfPublication(y, z)).</p><p>Informally, Q 1 (x) asks for scientists who are either working for oxford university and did not receive their Ph.D. from that university, or who received their Ph.D. from oxford university but do not work for it. Whereas query Q 2 (x) asks for scientists of oxford university who are authors of at least one unpublished non-conference paper. Note that when searching for scientists, the system automatically searches for all subconcepts (known according to the background ontology), such as e.g. Ph.D. students or computer scientists.</p><p>Semantics of Positive Search Queries. We now define the semantics of positive Semantic Web search queries, which are free of negations, in terms of ground substitutions via the notion of logical consequence.</p><p>A search query Q(x) is positive iff it contains no negated conjunctive subqueries. A (variable) substitution θ maps variables from X to terms. A substitution θ is ground iff it maps to Web pages p ∈ P, Web objects o ∈ O, and data values v ∈ V. A closed first-order formula φ is a logical consequence of a knowledge base KB = (T , (A a ) a∈P∪O ), denoted KB |= φ, iff every first-order model I of T ∪ a∈P∪O A a also satisfies φ. </p><formula xml:id="formula_4">Q(x) = ∃y (Scientist(x) ∧ isAuthorOf(x, y) ∧ JournalPaper(y) ∧ ∃z yearOfPublication(y, z)).</formula><p>An answer for Q(x) to KB is θ = {x/i 2 }. Recall that i 2 represents the scientist Mary.</p><p>Semantics of General Search Queries. We next define the semantics of general Semantic Web search queries by reduction to the semantics of positive ones, interpreting negated conjunctive subqueries not ψ as the lack of evidence about the truth of ψ. That is, negations are interpreted by a closed-world semantics on top of the open-world semantics of DLs (we refer to <ref type="bibr">[8]</ref> for more motivation and background). </p><formula xml:id="formula_5">Q(x) = W n i=1 ∃yi φi,1(x, yi) ∧ • • • ∧ φ i,l i (x, yi) ∧ not φ i,l i +1 (x, yi) ∧ • • • ∧ not φi,m i (x, yi) ,</formula><p>an answer for Q(x) to KB is a ground substitution θ for the variables x such that KB |= Q + (xθ) and KB |= Q − (xθ), where Q + (x) and Q − (x) are defined as follows:</p><formula xml:id="formula_6">Q + (x) = W n i=1 ∃yi φi,1(x, yi) ∧ • • • ∧ φ i,l i (x, yi) and Q − (x) = W n i=1 ∃yi φi,1(x, yi) ∧ • • • ∧ φ i,l i (x, yi) ∧ (φ i,l i +1 (x, yi) ∨ • • • ∨ φi,m i (x, yi)) .</formula><p>Roughly, a ground substitution θ is an answer for Q(x) to KB iff (i) θ is an answer for Q + (x) to KB , and (ii) θ is not an answer for Q − (x) to KB , where Q + (x) is the positive part of Q(x), while Q − (x) is the positive part of Q(x) combined with the complement of the negative one. Observe that both Q + (x) and Q − (x) are positive queries. </p><formula xml:id="formula_7">Q(x) = ∃y (Article(x) ∧ hasAuthor(x, y) ∧ name(y, "mary") ∧ not JournalPaper(x) ∧ not ∃z yearOfPublication(x, z)</formula><p>).</p><p>An answer for Q(x) to KB is given by θ = {x/i 3 }. Recall that i 3 represents an unpublished conference paper entitled "Semantic Web search". Observe that the membership axioms Article(i 3 ) and hasAuthor(i 2 , i 3 ) do not appear in the semantic annotations A a with a ∈ P ∪ O, but they can be inferred from them using the background ontology T .</p><p>Ranking Answers. As for the ranking of all answers for a Semantic Web search query Q to a Semantic Web knowledge base KB (i.e., ground substitutions for all free variables in Q, which correspond to tuples of Web pages, Web objects, and data values), we use a generalization of the PageRank technique: rather than considering only Web pages and the link structure between Web pages (expressed through the role links to here), we also consider Web objects, which may occur on Web pages (expressed through the role contains), and which may also be related to other Web objects via other roles. More concretely, we define the ObjectRank of a Web page or an object a as follows:</p><formula xml:id="formula_8">R(a) = d • P b∈Ba R(b) / N b + (1 − d) • E(a) ,</formula><p>where (i) B a is the set of all Web pages and Web objects that relate to a, (ii) N b is the number of Web pages and Web objects that relate from b, (iii) d is a damping factor, and (iv) E associates with every Web page and every Web object a source of rank.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Deductive Offline Ontology Compilation</head><p>In this section, we describe the (deductive) offline ontology reasoning step, which compiles the implicit terminological knowledge in the TBox of a Semantic Web knowledge base into explicit membership axioms in the ABox, i.e., in the semantic annotations of Web pages / objects, so that it (in addition to the standard Web pages) can be searched by standard Web search engines. For the online query processing step, see <ref type="bibr">[8]</ref>.</p><p>The compilation of TBox knowledge into ABox knowledge is formalized as follows. Given a satisfiable Semantic Web knowledge base KB = (T , (A a ) a∈P∪O ), the simple completion of KB is the Semantic Web knowledge base KB = (∅, (A a ) a∈P∪O ) such that every A a is the set of all concept memberships A(a), role memberships P (a, b), and attribute memberships U (a, v) that logically follow from T ∪ a∈P∪O A a , where A ∈ A, P ∈ R A , U ∈ R D , b ∈ I, and v ∈ V. Informally, for every Web page and object, the simple completion collects all available and deducible facts (whose predicate symbols shall be usable in search queries) in a completed semantic annotation.</p><p>Example 5. Consider the TBox T of Example 1 and the semantic annotations (A a ) a ∈ P∪O of Example 1. The simple completion contains in particular the new axioms Article(i 3 ), hasAuthor(i 3 , i 2 ), and Article(i 4 ). The first two are added to A i3 and the last one to A i4 .</p><p>As shown in <ref type="bibr">[8]</ref>, general quantifier-free search queries to a Semantic Web knowledge base KB over DL-Lite A <ref type="bibr">[10]</ref> as underlying DL can be evaluated on the simple completion of KB (which contains only compiled but no explicit TBox knowledge anymore). Similar results hold when the TBox of KB is equivalent to a Datalog program, and the query is fully general. Hence, the simple completion assures (i) always a sound query processing and (ii) a complete query processing in many cases. For this reason, and since completeness of query processing is actually not that much an issue in the inherently incomplete Web, we propose to use the simple completion as the basis of our Semantic Web search.</p><p>Once the completed semantic annotations are computed, we encode them as HTML pages, so that they are searchable via standard keyword search. Specifically, we build one HTML page for the semantic annotation A a of each individual a ∈ P ∪ O. That is, for each individual a, we build a page p containing all the atomic concepts whose argument is a and all the atomic roles/attributes where the first argument is a (see Section 2).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Inductive Offline Ontology Compilation</head><p>We now describe an inductive inference based on similarity search, which we propose to use instead of deductive inference for offline ontology compilation in our approach to Semantic Web search. Section 6 then summarizes the central advantages of this proposal.</p><p>Inductive Inference Based on Similarity Search. In similarity search <ref type="bibr">[11]</ref>, the basic idea is to find the most similar object(s) to a query object (i.e., the one to be classified) with respect to a similarity (or dissimilarity) measure. We review the basics of the knearest-neighbor (k-NN) method applied to the Semantic Web context <ref type="bibr">[5]</ref>. The objective is to induce an approximation for a discrete-valued target hypothesis function h : IS → V from a space of instances IS to a set of values V = {v 1 , . . . , v s } standing for the classes (concepts) that have to be predicted. Let x q be the query instance whose class-membership is to be determined. Using a dissimilarity measure, the set of the k-nearest (pre-classified) training instances relative to x q is selected: NN (x q ) = {x 1 , . . . , x k }. Hence, the k-NN algorithm approximates h for classifying x q on the grounds of the value that h is known to assume for the training instances in NN (x q ). Precisely, the value is decided by means of a weighted majority voting procedure: it is the most voted value by the instances in NN (x q ) weighted by the similarity of the neighbor individual. The estimate of the hypothesis function for the query individual is:</p><formula xml:id="formula_9">ĥ(x q ) := argmax v∈V k i=1 w i δ(v, h(x i )) ,<label>(3)</label></formula><p>where δ returns 1 in case of matching arguments and 0 otherwise, and, given a dissimilarity measure d, the weights w i are determined by</p><formula xml:id="formula_10">w i = 1/d(x i , x q ).</formula><p>Observe that this setting assigns to the query instance x q a value, which stands for one in a set of pairwise disjoint concepts (corresponding to the value set V ). In a multirelational setting, as those of the Semantic Web (SW) context, this assumption cannot be made in general, since it is well known that an individual may be an instance of more than one concept. The problem is also related to the closed-world assumption (CWA) usually made in the knowledge discovery context. To deal with the open-world assumption (OWA), generally adopted for the SW representations, the absence of information on whether a training instance x belongs to the extension of a query concept Q should not be interpreted negatively, as in the standard settings which adopt the CWA, rather, it should count as neutral (uncertain) information. Assuming this alternate viewpoint, the multiclass classification problem is transformed into a ternary one and the V = {+1, −1, 0} value set is adopted for the classification of an individual with respect to a query concept Q and where the three values denote, respectively, membership, non-membership, and uncertainty. Hence, the task is cast as follows: given a query concept Q, determine the membership of an instance x q through the NN procedure (see Eq. 3) where V = {−1, 0, +1} and the hypothesis function values for the training instances are determined as:</p><formula xml:id="formula_11">h Q (x) = +1 K |= Q(x) −1 K |= ¬Q(x) 0 otherwise.</formula><p>That is, the value of h Q for the training instances is determined by logical entailment (denoted |=) of the corresponding assertion from the knowledge base. Alternatively, a look-up in the ABox of the knowledge base could be considered, thus obtaining a classification process less complex but also possibly less accurate.</p><p>For measuring the similarity between individuals, a totally semantic and language independent family of dissimilarity measures has been used <ref type="bibr">[5]</ref>. It is based on the idea of comparing the semantics of the input individuals along a number of dimensions represented by a committee of concept descriptions, say F = {F 1 , F 2 , . . . , F m }, which stands as a group of discriminating features expressed in the OWL-DL sub-language taken into account. It is formally defined as follows <ref type="bibr">[5]</ref>: Definition 5 (family of measures). Let KB = (T , A) be a knowledge base. Given a set of concept descriptions F = {F 1 , F 2 , . . . , F m }, corresponding weights w 1 , . . . , w m , and p &gt; 0, a family of dissimilarity functions d F p : Ind(A) × Ind(A) → [0, 1] is defined by:</p><formula xml:id="formula_12">∀a, b ∈ Ind(A) : d F p (a, b) := 1 |F|   |F| i=1 w i | δ i (a, b) | p   1/p</formula><p>, where the dissimilarity function δ i (i ∈ {1, . . . , m}) is defined as follows:</p><formula xml:id="formula_13">∀a, b ∈ Ind(A) : δ i (a, b) =      0 Fi(a) ∈ A ∧ Fi(b) ∈ A 1 Fi(a) ∈ A ∧ ¬Fi(b) ∈ A or ¬Fi(a) ∈ A ∧ Fi(b) ∈ A 1/2</formula><p>otherwise.</p><p>An alternative definition for the projections requires the entailment of an assertion (instance-checking) rather than the simple ABox look-up; this can make the measure more accurate yet more complex to compute. Moreover, using instance checking, induction is performed on top of deduction, thus making it a kind of completion of deductive reasoning.</p><p>As for the weights w i employed in the family of measures, they should reflect the impact of the single feature concept F i relative to the overall dissimilarity. This is determined by the quantity of information conveyed by a feature, which is measured as its entropy. Namely, the extension of a feature F i relative to the whole domain of objects may be probabilistically quantified as P Fi = |F i I |/|∆ I | (relative to the canonical interpretation I). This can be roughly approximated by |retrieval(F i )|/|Ind(A)|. Hence, considering also the probability P ¬Fi related to its negation and the one related to the unclassified individuals (relative to F i ), denoted P U , we may give an entropic measure for the feature: H(F i ) = − (P Fi log(P Fi ) + P ¬Fi log(P ¬Fi ) + P U log(P U )) .</p><p>The measures strongly depend on F. Here, we make the assumption that the feature-set F represents a sufficient number of (possibly redundant) features that are able to discriminate really different individuals. However, an optimal discriminating feature set could be learned <ref type="bibr">[7]</ref>. Experimentally, we obtained good results by using the very set of both primitive and defined concepts found in the knowledge base <ref type="bibr">[5]</ref>.</p><p>Measuring the Likelihood of an Answer. The inductive inference made by the procedure shown above is not guaranteed to be deductively valid. Indeed, inductive inference naturally yields a certain degree of uncertainty. So, from a more general perspective, the main idea behind the above inductive inference for Semantic Web search is closely related to the idea of using probabilistic ontologies to increase the precision and the recall of querying databases and of information retrieval in general. But, rather than learning probabilistic ontologies from data, representing probabilistic ontologies, and reasoning with probabilistic ontologies, we directly use the data in the inductive inference step.</p><p>In order to measure the likelihood of the decision made by the inductive procedure (individual x q belongs to the query concept denoted by value v maximizing the argmax argument in Eq. 3), given the k-nearest training individuals in NN (x q ) = {x 1 , . . . , x k }, the quantity that determined the decision should be normalized by dividing it by the sum of such arguments over the (three) possible values:</p><formula xml:id="formula_14">l(class(xq) = v|NN (xq)) = P k i=1 wi • δ(v, hQ(xi)) P v ∈V P k i=1 wi • δ(v , hQ(xi)) .<label>(4)</label></formula><p>Hence, the likelihood of the assertion Q(x q ) corresponds to the case when v = +1. The computed likelihood can be used for building a probabilistic ABox (which is a collection of pairs, each consisting of a classical ABox axiom and a probability value).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6">Inconsistencies, Noise, and Incompleteness</head><p>In this section, we illustrate the main advantages of using inductive reasoning in Semantic Web search, namely, that inductive reasoning (differently from deductive reasoning) can handle inconsistencies, noise, and incompleteness in Semantic Web knowledge bases, which are all very likely to occur when knowledge bases are stored in a distributed and heterogeneous fashion, like on the Web.</p><p>Inconsistencies. Since our inductive method is based on the majority vote of the individuals in the neighborhood, it may be able to give a correct classification even in the case of inconsistent knowledge bases. This aspect is illustrated by the following example. Example 6. Consider the description logic knowledge base KB = (T , A) that consists of the following TBox T and ABox A: Actually, Nick is a Professor, indeed, he is the supervisor of a PhD thesis in A. However, by human mistake, he is asserted to be a Researcher in A, and by the axiom for Researcher in T , he cannot be the supervisor of any PhD thesis. Hence, KB is inconsistent, and thus a deductive reasoner cannot answer whether Nick is a Professor or not (since everything can be deduced from an inconsistent knowledge base). On the contrary, by inductive reasoning, it is highly probable that the returned classification result is that Nick is an instance of Professor. This is because the most similar individuals are Franz, John, and Flo, and all of them vote for the concept Professor.</p><formula xml:id="formula_15">T = {Man ≡</formula><p>Noise. Inductive reasoning may also be able to give a correct classification in the presence of noise in a knowledge base (containing, e.g., incorrect concept and/or role membership assertions), which is illustrated by the following example. Again, Nick is actually a Professor, but by human mistake asserted to be a Researcher in KB . But due to the slightly modified axiom for Researcher, there is no inconsistency in KB anymore. By deductive reasoning, however, Nick turns out to be a Researcher, whereas by inductive reasoning, it is highly probable that the returned classification result is that Nick is an instance of Professor, as above, because the most similar individuals are Franz, John, and Flo, and all of them vote for the concept Professor.</p><p>Incompleteness. Clearly, inductive reasoning may also be able to give a correct classification in the presence of incompleteness in a knowledge base. That is, inductive reasoning is not necessarily deductively valid, and may produce new knowledge.</p><p>Example 8. Consider the description logic knowledge base KB = (T , A ), where the TBox T is as in Example 7 and the ABox A is obtained from the ABox A of Example 6 by removing the axiom Researcher(Nick). Then, the resulting knowledge base is neither inconsistent nor noisy, but it is now incomplete. Nonetheless, by the same line of argumentation as in Examples 6 and 7, it is highly probable that the classification result by inductive reasoning is that Nick is an instance of Professor.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="7">Implementation and Experiments</head><p>In this section, we describe our prototype implementation for a semantic desktop search engine. Furthermore, we report on very positive experimental results on the precision and the recall under inductively vs. deductively completed semantic annotations.</p><p>Implementation. We have implemented a prototype for a semantic desktop search engine. We have realized both a deductive and an inductive version of the offline inference step for generating the completed semantic annotation for every considered resource. The deductive version uses PELLET <ref type="foot" target="#foot_0">1</ref> , while the inductive one is based on the k-NN technique, integrated with an entropic measure, as proposed in Section 5. Specifically, each individual i of a Semantic Web knowledge base is classified relative to all atomic concepts and all restrictions ∃R − .{i} with roles R. The parameter k was set to log(|Ind(A)|), where Ind(A) stands for all individuals in the knowledge base. The simpler distances d F 1 were employed, using all the atomic concepts in the knowledge base for determining the set F.</p><p>Precision and Recall of Inductive Semantic Web Search. We next give an experimental comparison between Semantic Web search under inductive and under deductive reasoning. We do this by providing the precision and the recall of the latter vs. the former. Our experimental results with queries relative to the FINITE-STATE-MACHINE (FSM) and the SURFACE-WATER-MODEL (SWM) ontology from the Protégé Ontology Library<ref type="foot" target="#foot_1">2</ref> are summarized in Table <ref type="table" target="#tab_2">1</ref>. For example, Query (8) asks for all transitions having no target state, while Query (16) asks for all numerical models having either the domain "lake" and public availability, or the domain "coastalArea" and commercial availability. The experimental results in Table <ref type="table" target="#tab_2">1</ref> essentially show that the answer sets under inductive reasoning are very close to the ones under deductive reasoning.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="8">Summary and Outlook</head><p>We have presented a combination of Semantic Web search as presented in <ref type="bibr">[8]</ref> with an inductive reasoning technique, based on similarity search <ref type="bibr">[11]</ref> for retrieving the resources that likely belong to a query concept <ref type="bibr">[5]</ref>. As a crucial advantage, the new approach to Semantic Web search allows for handling inconsistencies, noise, and incompleteness, which are very likely in distributed and heterogeneous environments, such as the Web. We have also reported on a prototype implementation and very positive experimental results on the precision and the recall of the new inductive approach to Semantic Web search. </p><formula xml:id="formula_16">(x) ∧ hasStateMachineElement(x, accountDetails) 1 1 1 1 1 4 FSM State(y) ∧ StateMachineElement(x) ∧ hasStateMachineElement(x, y) 3 3 3 1 1 5 FSM Action(x) ∨ Guard(x) 12 12 12 1 1 6 FSM ∃y, z (State(y) ∧ State(z) ∧ Transition(x) ∧ source(x, y) ∧ target(x, z)) 11 2 2 1 0.18 7 FSM StateMachineElement(x) ∧ not ∃y (StateMachineElement(y) ∧ hasStateMachineElement(x, y)) 34 34 34 1 1 8 FSM Transition(x) ∧ not ∃y (State(y) ∧ target(x, y)) 0 5 0 0 1 9 FSM ∃y (StateMachineElement(x) ∧ not hasStateMachineElement(x, accountDetails) ∧ hasStateMachineElement(x, y) ∧ State(y)) 2 2 2 1 1 10 SWM Model(x) 56 56 56 1 1 11 SWM Mathematical(x) 64 64 64 1 1 12 SWM Model(x) ∧ hasDomain(x, lake) ∧ hasDomain(x, river) 9 9 9 1 1 13 SWM Model(x) ∧ not ∃y (Availability(y) ∧ hasAvailability(x, y)) 11 11 11 1 1 14 SWM Model(x) ∧ hasDomain(x, river) ∧ not hasAvailability(x, public) 2 8 0 0 0 15 SWM ∃y (Model(x) ∧ hasDeveloper(x, y) ∧ University(y)) 1 1 1 1 1 16 SWM Numerical(x) ∧ hasDomain(x, lake) ∧ hasAvailability(x, public)∨ Numerical(x) ∧ hasDomain(x, coastalArea) ∧ hasAvailability(x, commercial) 12 9 9 1 0.75</formula><p>In the future, we aim especially at extending the desktop implementation to a real Web implementation, using existing search engines, such as Google. Another interesting topic is to explore how search expressions that are formulated as plain natural language sentences can be translated into the ontological conjunctive queries of our approach. It would also be interesting to investigate the use of probabilistic ontologies rather than classical ones.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>In the context of reasoning in the Semantic Web (SW), a growing interest is being committed to alternative procedures extending the standard methods so that they can deal with the various facets of uncertainty related with Web reasoning <ref type="bibr">[1]</ref>. Extensions of the classic probability measures <ref type="bibr">[2]</ref> offer alternative ways to deal with inherent uncertainty of the knowledge bases (KBs) in the SW. Particularly, belief and plausibility measures adopted in the Dempster-Shafer Theory of Evidence <ref type="bibr">[3]</ref> have been exploited as means for dealing with incompleteness <ref type="bibr">[4]</ref> and also inconsistency <ref type="bibr">[5]</ref>, which may arise from the aggregation of data and metadata on a large and distributed scale. In this work we undertake again the inductive point of view. Indeed, in many SW domains a very large number of assertions can potentially be true but often only a small number of them is known to be true or can be inferred to be true. So far the application of combination rules related to the Dempster-Shafer theory has concerned the induction of metrics which are essential for all similarity-based reasoning methods <ref type="bibr">[4]</ref>. One of the applications of such measures was related to the prediction of assertions through nearest neighbor procedures. Recently a general-purpose evidential nearest neighbor procedure based on the Dempster-Shafer combination rule has been proposed <ref type="bibr">[6]</ref>. In this work this method is extended to the specific case of semantic KBs through a more epistemically appropriate combination procedure <ref type="bibr">[7]</ref>. In the perspective of inductive methods, the need for a definition of a semantic similarity measure for individuals arises, that is a problem that so far received less attention in the literature compared to the measures for concepts.</p><p>Recently proposed dissimilarity measures for individuals in specific languages founded in Description Logics <ref type="bibr">[8]</ref> turned out to be practically effective for the targeted inductive tasks <ref type="bibr">[9]</ref>, however they are still based on structural criteria so that they can hardly scale to more complex languages. We devised families of dissimilarity measures for semantically annotated resources, which can overcome the aforementioned limitations <ref type="bibr">[10,</ref><ref type="bibr">11]</ref>. Our measures are mainly based on the Minkowski's norms for Euclidean spaces induced by means of a method developed in the context of relational machine learning <ref type="bibr">[12]</ref>. Namely, the measures are based on the degree of discernibility of the input individuals with respect to a given context <ref type="bibr">[13]</ref> (or committee of features), which are represented by concept descriptions expressed in the language of choice.</p><p>The main contributions of this work regard the extension of a framework for the classification of individuals through a prediction procedure based on evidence theory and similarity. In particular we propose using Yager's rule of combination and exploiting the mentioned families of metrics defined for individuals in ontologies. This allows for measuring the confirmation of the truth of candidate assertions. The prediction of the values (related to class-membership or datatype and object properties) may have plenty of applications in uncertainty reasoning with ontologies.</p><p>The remainder of the paper is organized as follows. In the next section ( §2), distance measures that shall be utilized for selecting neighbor individuals are introduced. Then ( §3), the basics of the Dempster-Shafer theory and a nearestneighbor procedure based on an alternative rule of combination are recalled. Hence ( §4) we present the applications of the method to the problems of determining the class-or role-membership of individuals w.r.t. given query concepts / roles as well as the prediction of fillers for datatype properties. Relevant related work are discussed in ( §5) and we conclude ( §6) proposing extensions and applications of these methods in further works.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Dissimilarity Measures for Individuals</head><p>Since the reasoning method to be presented in the following is intended to be general purpose, no specific language will be assumed in the following for resources, concepts (classes) and their properties. It suffices to consider a generic representation that can be mapped to some Description Logic language with the standard model-theoretic semantics (see <ref type="bibr">[8]</ref> for a thorough reference).</p><p>A knowledge base K = T , A comprises a TBox T and an ABox A. T is a set of axioms concerning the (partial) definition of concepts (and roles) through class (role) expressions. A contains assertions (ground facts) concerning the world state. The set of the individuals occurring in A will be denoted with Ind(A). Each individual can be assumed to be identified by its own URI (it is useful in this context to make the unique names assumption).</p><p>Similarity-based tasks, such as individual classification, retrieval, and clustering require language-independent measures for individuals whose definition can capture semantic aspects of their occurrence in the knowledge base <ref type="bibr">[10,</ref><ref type="bibr">11]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Evidential Nearest-Neighbors Classification for Inductive ABox Reasoning 29</head><p>For our purposes, we need functions to assess the similarity of individuals. However individuals do not have an explicit syntactic (or algebraic) structure that can be compared (unless one resorts to language-specific notions <ref type="bibr">[9]</ref>, such as the most specific concept <ref type="bibr">[8]</ref>). Focusing on the semantic level, the leading idea may be that, similar individuals should behave similarly w.r.t. the same concepts. A way for assessing the similarity of individuals in a knowledge base can be based on the comparison of their semantics along a number of dimensions represented by a set of concept descriptions (henceforth referred to as the committee or context <ref type="bibr">[13]</ref>). Specifically, the measure may compare individuals on the grounds of their behavior w.r.t. a given context, say C = {C 1 , C 2 , . . . , C m }, which stands as a group of discriminating relevant concepts (features) expressed in the considered language. We begin with defining the behavior of an individual w.r.t. a certain concept in terms of projecting it in this dimension: Given a concept C i ∈ C, the related projection function π i : Ind(A) → {0, <ref type="foot" target="#foot_2">1</ref>2 , 1} is defined:</p><formula xml:id="formula_17">∀a ∈ Ind(A) π i (a) =    1 K |= C i (a) 0 K |= ¬C i (a) 1 2 otherwise</formula><p>The case of π i (a) = 1 2 corresponds to the case when a reasoner cannot give the truth value for a certain membership query. This is due to the Open World Assumption normally made in Semantic Web reasoning. Hence, as in the classic probabilistic models, uncertainty may be coped with by considering a uniform distribution over the possible cases. Further ways to approximate these values in case of uncertainty are investigated in <ref type="bibr">[4]</ref>.</p><p>The discernibility functions related to the context w.r.t. which two input individuals are compared are defined as follows. Given a feature concept C i ∈ C, the related discernibility function δ i : Ind(A) × Ind(A) → [0, 1] is defined as:</p><formula xml:id="formula_18">∀(a, b) ∈ Ind(A) × Ind(A) δ i (a, b) = |π i (a) − π i (b)|</formula><p>The discernibility function δ i assigns 0 if the two individuals a and b have the same behavior w.r.t. C i , that is if they are both instance of C i or both instance of ¬C i or nothing is known about this. This is because, if a and b have the same bahavior w.r.t. C i then there are no other information for discriminating them.</p><p>Finally, a family of dissimilarity measures for individuals that is inspired to the Minkowski's metrics can be defined <ref type="bibr">[10,</ref><ref type="bibr">11]</ref>: Let K = T , A be a knowledge base. Given a context C and a related vector of weights w, a family of dissimilarity measures {d</p><formula xml:id="formula_19">C p } p∈IN , d C p : Ind(A) × Ind(A) → [0, 1] is defined as follows: ∀(a, b) ∈ Ind(A) × Ind(A) d C p (a, b) = Ci∈C w i δ i (a, b) p 1 p</formula><p>The effect of the weights 1 is to normalize w.r.t. the other features involved. Obviously these measures are not absolute, then they should be also considered w.r.t. the context of choice, hence comparisons across different contexts may not be meaningful. Larger contexts are likely to decrease the measures because of the normalizing factor yet these values is affected also by the degree of redundancy of the features employed. In other works the choice of the weights is done according to variance or entropy associated to the various concepts in the context <ref type="bibr">[10,</ref><ref type="bibr">11]</ref>.</p><p>Compared to other proposed measures <ref type="bibr">[14,</ref><ref type="bibr">9,</ref><ref type="bibr">15]</ref>, the presented functions do not depend on the constructors of a specific language, rather they require only (retrieval or) instance-checking for computing the projections through classmembership queries to the knowledge base. The complexity of measuring the dissimilarity of two individuals depends on the complexity of such inferences (see <ref type="bibr">[8]</ref>, Ch. 3). Note also that the projections that determine the measure can be computed (or derived from statistics maintained on the knowledge base) before the actual distance application, thus determining a speed-up in the computation of the measure. This is very important for algorithms that massively use this distance, such as instance-based methods.</p><p>One should assume that C represents a set of (possibly redundant) features that are able to discriminate individuals that are actually different. The choice of the concepts to be included (a feature selection problem <ref type="bibr">[12]</ref>) may be crucial. Therefore, specific optimization algorithms founded in randomized search have been devised which are able to find optimal choices of discriminating contexts <ref type="bibr">[10,</ref><ref type="bibr">11]</ref>. However, the results obtained so far with knowledge bases drawn from ontology libraries showed that (a selection) of the primitive and defined concepts are often sufficient to induce sufficiently discriminating measures.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Evidence-Theoretic Nearest-Neighbor Prediction</head><p>In this section the basics of the theory of evidence and combination rules <ref type="bibr">[3]</ref> are recalled then a nearest neighbor classification procedure based on the rule of combination <ref type="bibr">[6]</ref> is extended in order to perform prediction of unobserved values (related to datatype properties or also class-membership).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1">Basics of the Evidence Theory</head><p>In the Dempster-Shafer theory, a frame of discernment Ω is defined as the set of all hypotheses in a certain domain. Particularly, in a classification problem it is the set of all possible classes. A basic belief assignment (BBA) is a function m that defines a mapping m : 2 Ω → [0, 1] verifying: A∈Ω m(A) = 1. Given a certain piece of evidence, the value of the BBA for a given set A expresses a measure of belief that is committed exactly to A. The quantity m(A) pertains only to A and does not imply any additional claims about any of its subsets. If m(A) &gt; 0, then A is called a focal element for m.</p><p>The BBA m cannot be considered a proper probability measure: it is defined over 2 Ω instead of Ω and it does not require the properties of monotone measures <ref type="bibr">[2]</ref>. The BBA m and its associated focal elements define a body of evidence, from which a belief function Bel and a plausibility function Pl can be derived as mappings from 2 Ω to [0, 1]. For a given A ⊆ Ω, the belief in A, denoted Bel(A), represents a measure of the total belief committed to A given the available evidence. Bel is defined as follows:</p><formula xml:id="formula_20">∀A ∈ 2 Ω Bel(A) = ∅ =B⊆A m(B)<label>(1)</label></formula><p>Analogously, the plausibility of A, denoted Pl(A), represents the amount of belief that could be placed in A, if further information became available. Pl is defined as follows:</p><formula xml:id="formula_21">∀A ∈ 2 Ω Pl(A) = B∩A =∅ m(B)<label>(2)</label></formula><p>It is easy to see that:</p><formula xml:id="formula_22">Pl(A) = Bel(Ω) − Bel( Ā). Moreover m(∅) = 1 − Bel(Ω) and for each A = ∅: m(A) = B⊆A (−1) |A\B| Bel(B)</formula><p>. Using these equations, knowing just one function among m, Bel, and Pl allows to derive the others.</p><p>The Dempster-Shafer rule of combination <ref type="bibr">[3]</ref> is an operation for pooling evidence from a variety of sources. This rule aggregates independent bodies of evidence defined within the same frame of discernment into one body of evidence. Let m 1 and m 2 be two BBAs. The new BBA obtained by combining m 1 and m 2 using the rule of combination, m 12 is the orthogonal sum of m 1 and m 2 . Generally, the normalized version of the rule is used: Different evidence fusion rules have been proposed <ref type="bibr">[2]</ref>. A more epistemologically sound combination rule <ref type="bibr">[7]</ref> for our purposes places the probability mass related to the conflict between the BBAs to the case of maximal ignorance.</p><formula xml:id="formula_23">∀A ∈ 2 Ω \ {∅} m 12 (A) = (m 1 ⊕ m 2 )(A) = B∩C=A m 1 (B) m 2 (C) 1 − B∩C=∅ m 1 (B) m 2 (C)</formula><formula xml:id="formula_24">∀A ∈ 2 Ω m 12 (A) =    B∩C=A m 1 (B) m 2 (C) A = Ω ∧ A = ∅ m 1 (Ω) m 2 (Ω) + c A = Ω 0 A = ∅</formula><p>This means that the conflict between the two sources of evidence is not hidden, but it is explicitly recognized as a contributor to ignorance. Due to the associativity and commutativity of the operations involved, it is easy to prove that the resulting combination operator is associative and commutative, and admits the vacuous BBA (Ω unique focal set) as neutral element.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2">The Nearest Neighbors Procedure</head><p>Let us consider the finite set of instances X and a finite set of integers V ⊆ Z Z to be used as labels (which may correspond to disjoint classes or distinct attribute values). The available information is assumed to consist in a training set TrSet = {(x 1 , v 1 ), . . . , (x M , v M )} ⊆ Ind × V of single-labeled instances (examples). In our case, X = Ind(A), the set of individual names occurring in the ontology.</p><p>Let x q be a new individual to be classified on the basis of its nearest neighbors in TrSet. Let N k (x q ) = {(x o(j) , v o(j) ) | j = 1, . . . , k} be the set of the k nearest neighbors of x q in TrSet sorted by a function o(•) depending on an appropriate metric d which can be applied to ontology individuals (e.g. one of the measures in the family defined in the previous section §2).</p><p>Each pair (x i , v i ) ∈ N k (x q ) constitutes a distinct item of evidence regarding the value to be predicted for x q . If x q is close to x i according to d, then one will be inclined to believe that both instances are associated to the same value, while when d(x q , x i ) increases, this belief decreases and that leads to a situation of almost complete ignorance concerning the value to be predicted for x q .</p><p>Consequently, each (x i , v i ) ∈ N k (x q ) may induce a BBA m i over V which can be defined as follows <ref type="bibr">[6]</ref>:</p><formula xml:id="formula_25">∀A ∈ 2 V m i (A) =    λσ(d(x q , x i )) A = {v i } 1 − λσ(d(x q , x i )) A = V 0 otherwise<label>(3)</label></formula><p>where λ ∈]0, 1[ is a parameter and σ(•) is a decreasing function such that σ(0) = 1 and lim d→∞ σ(d) = 0 (e.g. σ(d) = exp(−γd n ) with γ &gt; 0 and n ∈ IN). The values of the parameters can be determined heuristically.</p><p>Considering each training individual in N k (x q ) as an separate source of evidence, k BBAs m j are obtained. These can be pooled by means of the rule of combination leading to the aggregated BBA m that synthesizes the final belief:</p><formula xml:id="formula_26">m = k j=1 m j = m 1 ⊕ • • • ⊕ m k<label>(4)</label></formula><p>In order to predict a value, functions Bel and Pl can be derived from m using the equations seen above, and the query individual x q is assigned the value in V that maximizes the belief or plausibility:</p><formula xml:id="formula_27">v q = argmax (xi,vi)∈N k (xq) Bel({v i }) or v q = argmax (xi,vi)∈N k (xq) Pl({v i })</formula><p>The former choice (select the hypothesis with the greatest degree of belief the most credible) corresponds to a skeptical viewpoint while the latter (select the hypothesis with the lowest degree of doubt the most plausible) is more credulous.</p><p>The degree belief (or plausibility) of the predicted value provides also a way to compare the answers of an algorithm built on top of such analogical procedure. This is useful for tasks such as ranking, matchmaking, etc.. Finally, it is possible to combine the two measures Bel and Pl analogously to necessity (Nec) and possibility (Pos) in Possibility Theory (which can be considered a special case<ref type="foot" target="#foot_3">2</ref> of Dempster-Shafer theory). One can define a single</p><formula xml:id="formula_28">ENN k (xq, TrSet, V ) 1. Compute the neighbor set N k (xq) ⊆ TrSet. 2. for each i ← 1 to k do Compute mi (Eq. 3) 3. for each v ∈ V do</formula><p>Compute m (Eq. 4) and derive Bel and Pl (Eqs. 1-2) Compute the confirmation C (Eq. 5) from Bel and Pl 4. Select v ∈ V that maximizes C (Eq. 6). measure of confirmation C, ranging in [−1, +1], by means of a simple one-to-one transformation <ref type="bibr">[2]</ref>:</p><formula xml:id="formula_29">∀A ⊆ Ω C(A) = Bel(A) + Pl(A) − 1 (5)</formula><p>Hence, denoted with C the combination of Bel and Pl, the resulting rule for predicting the uncertain value for the test individual can be written as follows:</p><formula xml:id="formula_30">v q = argmax (xi,vi)∈N k (xq) C({v i })<label>(6)</label></formula><p>Summing up, the procedure is as reported in Fig. <ref type="figure" target="#fig_0">1</ref>: It is worthwhile to note that the complexity of the method is polynomial in the number of instances in the TrSet. If this set is compact and contains very prototypical individuals with plenty of related assertions, then the resulting predictions are likely to be accurate. Another source of complexity in the computations may be the number of values in V which may yield a large number of subsets 2 |V | for which BBAs are to be computed. However this depends also on the kind of problem that is to be solved (e.g. in class membership detection |V | = 2). Moreover what really matters in the number of focal sets for each BBA which may be much less than 2 |V | .</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Assertion Prediction</head><p>The utility of the presented procedure when applied to ontology reasoning can be manifold. In the following we propose its employment in the inductive prediction of unknown values related to class-membership and datatype / object property fillers. This feature may be easily embedded in an ontology management system in order to help the knowledge engineers elicit assertions which may be not be derived from the knowledge base yet they can be rather made in analogy with the others <ref type="bibr">[9]</ref>.</p><p>In the following, the symbol |≈ in expressions like K |≈ α will denote the derivation of the assertion α from the knowledge base K obtained through an alternative procedure (like the evidence nearest neighbor presented in the previous section).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.1">Class-Membership</head><p>Let us suppose a (query) concept Q is given. In this case one may consider only examples made up of individuals with a definite class-membership leading to a binary problem with a set of values V Q = {+1, −1} denoting, resp., membership and non-membership w.r.t. the query concept. Alternatively, one may admit ternary problems with some labels set to 0 to explicitly denote an indefinite (uncertain) class-membership <ref type="bibr">[9,</ref><ref type="bibr">10]</ref>. We shall also consider the related training set TrSet Q ⊆ Ind(A) × V Q . The values of the labels v i for the training examples can be obtained through deductive reasoning (instance-checking) or specific facilities made available by the knowledge management systems <ref type="bibr">[16]</ref>.</p><p>Now to predict the class-membership value v q for some individual x q w.r.t. Q, it suffices to call the procedure ENN k (x q , TrSet Q , V Q ) and decide on the grounds of the returned value. Thus in a binary setting (V Q = {+1, −1}), one will either conclude that K |≈ Q(x q ) or K |≈ ¬Q(x q ) depending on the value that maximizes C in Eq. 6 (resp., v q = +1 or v q = −1). Moreover the value of the confirmation function which determined the returned value v q can be exploited for ranking the hits by comparing the strength of the inductive conclusions.</p><p>Adopting a ternary setting, it may turn out that the most likely value is v q = 0 resulting in an uncertain case. One may force the choice among the values of C for v q = −1 and v q = +1, e.g. when the confirmation degree exceeds a some threshold.</p><p>The inductive procedure described above can be trivially exploited for performing the retrieval of a certain concept inductively. Given a certain concept Q, it would suffice to find all individuals a ∈ Ind(A) that are such that K |≈ Q(a). The hits could be returned ranked by the respective confirmation value C(+1).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2">Datatype Fillers</head><p>In this case, let us suppose a certain (functional) datatype property P is given and the problem is to predict its value for a certain test individual a (which has to be supposed to be in its domain). The set of values V P may correspond to the (discrete and finite) range of the property or to its restriction to the observed values for the training instances: V P = {v ∈ range(P ) | ∃P (a, v) ∈ A}. Different settings may be devised allowing for some special value(s) denoting the case of a yet unobserved value(s) for that property.</p><p>The related training set will be some TrSet P ⊆ domain(P ) × V P , where domain(P ) ⊆ Ind(A) is the set of individual names that have a known P -value in the knowledge base. Differently from the previous problem, datatype properties generally do not have a specific intensional definition in the knowledge base (except for the specification of domain and range), hence a mere look-up in the ABox should suffice to determine the TrSet. Now to predict the value in V P of the datatype property P for some individual a, the method requires calling the procedure with ENN k (a, TrSet P , V P ). Thus in this setting, if v q is the value that maximizes Eq. 6 then we can write K |≈ P (a, v q ). Also in this case the value of the confirmation function which determined choice of the value v q can be exploited for comparing the strength of an inductive conclusion to others.</p><p>In case of special settings with dummy values indicating unobserved values, when these are found to be the most credible among the others, a knowledge engineer should be contacted for the necessary changes to the ontology.</p><p>The inductive procedure described above can be trivially exploited for performing alternate forms of retrieval, e.g. finding all individuals with a certain value for the given property. Given a certain value v, it would suffice to find all individuals a ∈ Ind(A) that are such that K |≈ P (a, v). Again, the hits could be returned ranked according to the respective confirmation value C(+1).</p><p>The limitation of treating only functional datatype properties may be overcome by considering a different way to assign the probability mass to BBAs than Eq. 3, including subsets of all possible values. Examples are to be constructed accordingly (labels will be chosen in 2 V P ). Alternatively, more complex frames of discernment, e.g. Ω = 2 Ω , so consider sets of values as possible fillers of the property. In all such settings the computation of the BBAs and descending measures would become of course much more complex and expensive, yet clever solutions (or approximations) proposed in the literature <ref type="bibr">[6]</ref> may contribute to mitigate this problem.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.3">Relationships among Individuals</head><p>In principle, a very similar setting may be used in order to establish the possibility that a certain test individual is related through some object property with some other individual <ref type="bibr">[17,</ref><ref type="bibr">18]</ref>.</p><p>Since the set Ind(A) is finite (the target is not discovering relations with unseen individuals), one may want to find all individuals that are related to a test one through some object property, say R. The problem can be decomposed into smaller ones aiming at verifying whether K |≈ R(a, b) holds:</p><formula xml:id="formula_31">for each b ∈ Ind(A) do for each a ∈ Ind(A) do TrSet ← {(x, v) | x ∈ Ind(A) \ {a}, if K |= R(x, b) then v ← +1 else v ← −1} v R b ← ENN k (a, TrSet, {+1, −1}) if v R b = +1 then return K |≈ R(a, b) else return K |≈ ¬R(a, b)</formula><p>Note that, in the construction of the training sets, the inference K |= R(x, b) may turn out to be merely an ABox lookup operation for the given assertions (when roles are not intensionally defined in a proper RBox). Conversely, if an RBox is available (sometimes as a subset of the TBox) the values of the label for the training examples can be obtained through deductive reasoning (instancechecking) or the mentioned facilities made available by advanced reasoners or knowledge management systems <ref type="bibr">[16]</ref>.</p><p>This simple setting makes a sort of closed-world assumption in the decision of the induced assertions descending from the adoption of the binary value set and the composition of the TrSet. A more cautious setting would involve a ternary value set V R = {−1, 0, +1} which allows for an explicit treatment of those individuals a for which R(a, b) is not derivable (or just absent from the ABox). The final decision on the induced conclusion has to consider also this new possibility (e.g. using a threshold of confirmation for accepting likely assertions).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Related Work</head><p>The proposed method is related to those approaches devised to offer alternative ways of reasoning with ABoxes for eliciting hidden knowledge (regularities) in order to complete and populate the ontology with likely assertions even in the occurrence of incorrect parts, supposing this kind of noise is not systematic.</p><p>The tasks of ontology completion and population have often been tackled through formal methods (such as formal concept analysis <ref type="bibr">[19]</ref>). Discovering new assertions (and related probabilities in a classical setting) is another related task for eliciting hidden knowledge in the ontologies. In <ref type="bibr">[18]</ref> a machine learning method is proposed to estimate the truth of statements by exploiting regularities in the data. In <ref type="bibr">[17]</ref> another statistical learning method for OWL-DL ontologies is proposed, combining a latent relational graphical model with Description Logic inference in a modular fashion. The probability of unknown role-assertions can be inductively inferred and known concept-assertions can be analyzed by clustering individuals.</p><p>Similarity-based reasoning with ontologies is the primary aim of this work which follows a number of related methods founded on dissimilarity measures for individuals in knowledge bases expressed in Description Logics <ref type="bibr">[9,</ref><ref type="bibr">10]</ref>. Mostly, they adopt some alternate form of the classic Nearest-Neighbor lazy learning scheme <ref type="bibr">[12]</ref> in order to draw inductive conclusions that often cannot be deductively entailed by the knowledge bases.</p><p>Similar approaches based on lazy learning have been proposed that adopt generalized probability theories such as the Dempster-Shafer. In <ref type="bibr">[6]</ref>, which was a source of inspiration for this paper, the standard rule of combination is exploited in an evidence-theoretic classification procedure where labels were not assumed to be mutually exclusive. Rules of combination had been used in <ref type="bibr">[4]</ref> in order to learn precise metrics to be exploited in a lazy learning setting like those mentioned above.</p><p>One of the most appreciated advantages of performing inductive ABox reasoning through these methods is that they can naturally handle inconsistent (and inherently incomplete) knowledge bases, especially when inconsistency is not systematic. In <ref type="bibr">[5]</ref> a method for dealing with inconsistent ABoxes populated through information extraction is proposed: it constructs ad hoc belief networks for the conflicting parts in an ontology and adopts the Dempster-Shafer theory for assessing the confidence of the resulting assertions.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6">Concluding Remarks and Outlook</head><p>In the line of our investigation of inductive methods for Semantic Web reasoning, we have proposed an alternative way for approximate ABox reasoning based on the nearest-neighbors analogical principle. Once neighbors of a test individual are selected through some distance measures, a combination rule descending from the Dempster-Shafer theory can fuse the evidence provided by the various neighbor individuals. We have shown how to exploit the procedure for assertion prediction problems such as determining unknown class-or role-memberships as well as attribute-values which may be the basis for many ABox inductive reasoning algorithms. The method is being implemented so to allow an extensive experimentation on real ontologies.</p><p>Special settings to accommodate cases of uncertain or unobserved values are to be investigated. One promising extension of the method concerns the possibility of considering infinite sets of values V following the studies <ref type="bibr">[20,</ref><ref type="bibr">2]</ref>. This would allow dealing with domains where the total amount of values is unknown (also due to the inherent nature of the Semantic Web). Moreover the predicted values often need not to be exclusive. Hence the prediction procedure would require an extension towards the consideration of sets of values instead of singletons.</p><p>As necessity and possibility measures are related to the belief measures (see note 2 at page 32) a natural extension may be towards the possibilistic theory and its calculus which is, in general, different from the Dempster-Shafer theory and calculus. Further possible extensions concern all other monotone measures such as the Sugeno λ-measures <ref type="bibr">[2]</ref>. The extension towards the Possibility Theory is interesting also because of its parallelism with modal logics <ref type="bibr">[20]</ref> and possibilistic extensions of Description Logics <ref type="bibr">[21]</ref>.  </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>�����������������������������������������������������</head><formula xml:id="formula_32">���������������������� ��� ������������������������������������������������������������������������������� ���������������������������������������� ��� ��������������������� � �� ��� � ���������������������� � ������������� � ��������������������� � ���������������� � � � ������������������ � � � ������������������������������ � � � ���������������������� ��� � ���������������������������������������������������������������������������� � � ���������������������������������������������������� � ����� � ����������������� ���������� ��� � �������������� � ����������������������������� � ����������� � � �� ��� � ������������������������������������������������������������������������������ ��� � � ����������������������������������������� � � ����������� � ������������������ � �� ��� � � ��������� � �� � � �������������� � � ������������������������������� ��� � � � � ���� � ����� �� �� ���� � ���� ���� � ����������� � ����� � � ����������� � � ��� ��� � ��������������������������������� ���� ���� ���������������������� ���������������������������������������������������������������������������������������� � ����������������� ������������������������� � ���������������������������������������������������������������������������� � ������� ������������������������������ � ��������������������������������������������������������������������������� ����������� �����������</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Logic-based languages have long been recognized as an effective means to represent information clearly, unambiguously, and in a manner that facilitates processing by machines. By far the most common logical basis for Semantic Web languages is classical first-order logic (FOL). This is no accident: its clear syntax, well-understood semantics, and complete proof theory make FOL a natural choice for computational knowledge representation and reasoning. However, FOL lacks a fundamental capability essential for semantically aware systems. As Jeffreys <ref type="bibr">[1]</ref> put it, "Traditional or deductive logic admits only three attitudes to any proposition: definite proof, disproof, or blank ignorance." An intelligent reasoner must do more: it must assess the plausibility of uncertain hypotheses, make reasonable choices when the outcome is uncertain, and use observations to improve its representation of the world. Probability is the unique plausible reasoning calculus that satisfies certain intuitively satisfying axioms of coherent reasoning (e.g., <ref type="bibr">[2]</ref>). For this reason, probability has achieved a privileged status for plausible reasoning akin to FOL's privileged status with respect to logical reasoning. The past few decades have given rise to increasingly expressive probability-based languages, as well as a host of 52 K. Laskey restricted languages designed for scalability. There is increasing interest in probability for semantic web applications <ref type="bibr">[3]</ref>.</p><p>It is often taken for granted that a new kind of logic is needed to capture essential aspects of plausible reasoning: "Ordinary logic seems to be inadequate by itself to cope with problems involving beliefs. In addition a theory of probability is required" <ref type="bibr">[4]</ref>. Because of their built-in machinery for reasoning about functions, higher-order logic has been proposed as a natural logical basis for combining probability and logic <ref type="bibr">[5]</ref>. On the other hand, its complete proof theory makes first-order logic attractive as a computational logic. Moreover, it is attractive to use the same logic to reason both about the domain itself and about the plausibility of statements about the domain. The question thus arises of whether an adequate formalization of probability is possible within first-order logic itself.</p><p>This paper formalizes, within standard first-order logic, a probabilistic logic powerful enough to express uncertainty about arbitrary first-order sentences. By formalizing probability as an axiomatic first-order theory, probabilities can be associated coherently with arbitrary first-order sentences, with no modification of traditional first-order semantics. To stay within axiomatic first-order logic, probabilities are defined not as real numbers, but as elements of a real closed field. An axiom schema is added to the standard axioms of the probability calculus to give "logical teeth" to the idea that probability zero events do not happen. The semantics proposed here connects naturally to algorithmic notions of randomness as proposed by Kolmogorov and Martin-Löf <ref type="bibr">[6]</ref>, <ref type="bibr">[7]</ref>, as well as to Dawid's <ref type="bibr">[8]</ref> calibration criterion.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">First-Order Probability</head><p>We begin with a first-order language L used to make assertions about a domain. L includes the usual logical symbols (variables, logical connectives, universal and existential quantifiers), together with a set of domain-specific predicate, function and constant symbols. Without loss of generality, L is taken to be a traditional, untyped first-order language. <ref type="foot" target="#foot_4">1</ref> To operationalize the requirement that assertions be expressible as a finite computational structure, a knowledge base (KB) is taken to be an axiomatic theory of L. That is, a KB contains a consistent, recursive set A of sentences of L. The logical consequences of these axioms comprise a recursively enumerable set T A = Cn(A), called the theory of A. Gödel's completeness theorem implies that T A is equal to the set {σ : A ⊢ σ} of sentences provable from A.</p><p>In general, a KB may be incomplete -it need not imply a definite truth-value for every sentence. In fact, a sufficiently powerful theory is necessarily incomplete. It is useful for a reasoner to grade the plausibility of propositions it can neither prove nor disprove. Probability is a natural candidate for this purpose.</p><p>It seems reasonable to define probability as a function mapping each sentence to a real number between zero and 1, in a manner that satisfies the standard identities of probability theory, and so that sentences provable (disprovable) from A are assigned probability 1 (0). This approach, natural as it seems, runs into difficulty. The first roadblock is that in standard first-order logic, arguments of functions must be elements of the domain, not sentences or propositions. The second roadblock is that the theory of the real numbers cannot be fully characterized as an axiomatic first-order theory. Several authors have shown that formalizing probabilities as a real numbers in the unit interval results in a theory that cannot be axiomatized, and that does not admit a complete proof theory (cf., <ref type="bibr">[10]</ref>, <ref type="bibr">[11]</ref>). On the other hand, by abandoning the requirement that probabilities be real-valued, Bacchus <ref type="bibr">[12]</ref> developed axiomatic probability logics that have a complete proof theory.</p><p>Given the mathematical impossibility of both an axiomatic first-order theory and a function mapping sentences to real numbers, which should be preferred? To answer this question, we step back to first principles, and consider fundamental requirements for a computational probabilistic logic. First, a computational logic should explicitly represent all mathematical and logical assumptions as finite computational structures accessible to an automated reasoner. Second, it should be possible for a reasoner to discover contradictions in a knowledge base, to identify when observations are inconsistent with a theory, and to prove any consequence entailed by a theory. Third, a logic for plausible reasoning must be able to associate measures of plausibility with propositions, to express degrees of plausibility intermediate between proof and disproof, and to do so in a logically coherent manner. All these requirements are met by the proposed formalism. Furthermore, the first two requirements are automatically satisfied if probability is formalized as a traditional first-order axiomatic theory, while the final requirement can be met without demanding either that probability be formalized as a function on sentences, or that probability values be real numbers. Hence, a first-order axiomatic theory is a fundamental requirement, whereas a function from sentences to real numbers is dispensable.</p><p>If probability is not a function mapping sentences to real numbers, then what is it? We formalize probability as a function mapping Gödel numbers to elements of a realclosed field (RCF). A RCF is the closest one can come to formalizing the real numbers within first-order logic. The real numbers are uniquely characterized up to isomorphism as an ordered field with the least upper bound property. The ordered field axioms formalize familiar properties of the real and rational numbers: addition, multiplication, additive and multiplicative inverses (hence, subtraction and division), distribution of multiplication over addition, and complete ordering. These axioms can be formalized fully in FOL. The defining property of the real numbers, that every bounded non-empty set of real numbers has a least upper bound, is not a first-order property. In a RCF, the least upper bound property holds for all definable relations. The RCF axioms are sufficient to characterize all first-order properties of the real numbers (cf., <ref type="bibr">[13]</ref>). Thus, we assume probabilities are elements of a RCF.</p><p>Gödel showed that, given a sufficiently powerful formal system, domain elements (e.g., numbers) can be associated with sentences, formulas, and proofs. This device allows indirect expression of and reasoning about logical notions such as proof and consistency, while complying with FOL's prohibition against direct reference to sentences. Defining probabilities as a mapping from Gödel numbers to elements of a RCF allows us to develop a fully first-order axiomatization of probability. We argue later that our axioms capture the essential requirements for a computational logic of plausible reasoning.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="54">K. Laskey</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">The Probability Axioms</head><p>The original language L and axioms A are augmented with additional symbols and axioms to provide the necessary logical apparatus for probabilistic reasoning. The augmented language and axioms are called L* and A*, respectively.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1">The Language</head><p>The language L* has numerical constants 0 and 1; arithmetic ordering predicate ≤; arithmetic operators + and ×; one-place predicate symbols R and N to represent real and natural numbers; and the two-place function symbol P to represent probability. In addition, there is a predicate D to represent elements of the domain; and a countable collection L 1 , L 2 , … of labels as names for individuals. If L already has mathematical symbols and mathematical axioms consistent with our probability axioms, we can make use of the existing logical machinery; otherwise new symbols are added.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2">The axioms</head><p>Domain axioms. Our first step is to relativize the domain axioms to D. This is achieved through a standard syntactic translation, e.g., ∀x ϕ(x) becomes ∀x D(x)→ϕ(x), and the sentence ∃x ϕ(x) becomes ∃x D(x)∧ϕ(x) (c.f., <ref type="bibr">[9]</ref>).</p><p>Integer arithmetic. We require enough integer arithmetic to allow Gödel numbering and reasoning about provability. The following axioms, together with the RCF axioms defined below (which apply to natural numbers by virtue of inclusion) serve this purpose:</p><formula xml:id="formula_33">N1. ∀x N(x)→ R(x) N2. N(0) N3. ∀x N(x)→ N(x+1) N4. ∀x ∀y N(x) ∧ N(y) → ((x &lt; y+1) → (x ≤ y)) N5. ∀x N(x)→ ¬(x &lt; 0) N6. All universal closures of formulas N(x)→ (ϕ(0)∧∀x(ϕ(x) → ϕ(x+1))) → ∀x ϕ(x)</formula><p>, where ϕ(x) has x (and possibly other variables) free.</p><p>Real closed field axioms. As described above, probabilities are formalized as elements of a real closed field (RCF), the first-order theory of the real numbers. Axioms for a real closed field can be found in <ref type="bibr">[13]</ref>. Note that by virtue of being natural numbers, the constants 0 and 1 are also real numbers.</p><p>R1. Additive and multiplicative closure:</p><formula xml:id="formula_34">∀x ∀y R(x)∧R(y) → R(x+y) ∧ R(x ⋅ y) R2. Commutativity: ∀x ∀y R(x)∧R(y) → (x+y = y+x) ∧ (x ⋅ y = y ⋅ x) R3. Associativity: ∀x∀y∀z R(x)∧R(y)∧R(z) → (((x+y) + z = x + (y + z)) ∧ ((x ⋅ y) ⋅ z = x ⋅ (y ⋅ z))) R4. Identity: R(0) ∧ R(1) ∧ 0≠1 ∧ ( ∀x R(x) → ((x+0 = x) ∧ (x ⋅ 1 = x)) ) R5. Inverses: ∀x R(x) → ( ∃y (x + y = 0) ∧ (x≠0 → ∃y (xz = 1) ) ) R6. Distributive Law: ∀x∀y∀z R(x)∧R(y)∧R(z) → (x ⋅ (y + z) = (x ⋅ y) + (x ⋅ z)) R7. Total order: ∀x∀y∀z R(x)∧R(y)∧R(z) → (( x ≤ y ∨ y ≤ x ) ∧ (x ≤ y ∧ y ≤ x → x=y) ∧ (x≤y ∧ y≤z → x≤z)) R8. Agreement of ordering with field operations: : ∀x∀y∀z R(x)∧R(y)∧R(z) → ( (x≤y → x+z ≤ y+z) ∧ ((0 ≤ x ∧ 0 ≤ y) → 0 ≤ x ⋅ y) ) R9.</formula><p>First-order closure: The following axiom schema holds for all one-place formulas ϕ(x):</p><formula xml:id="formula_35">∀x (ϕ(x) → R(x)) ∧ ∃x ϕ(x) ∧ ∃y (R(y) ∧ ∀x (ϕ(x) → x ≤ y) ) → ∃y (R(y) ∧ ∀x (ϕ(x) → x ≤ y) ∧ ∀z (R(z) ∧ ∀x (ϕ(x) → x ≤ z) ↔ y ≤ z)</formula><p>Axiom schema R9 is the first-order "image" of the least upper bound axiom. It states that if ϕ(x) represents a non-empty subset of the real numbers and ϕ(x) has a real upper bound, then ϕ(x) has a real least upper bound. Tarski <ref type="bibr">[14]</ref> showed that the theory of real closed real fields can be characterized as an ordered field in which every element has a square root and every polynomial of odd degree has a root. R9 covers not only relations definable in the language of the real numbers, but also any real relation definable in L*. Thus, the above axioms are stronger than the standard RCF axioms.</p><p>Probability axioms. Good <ref type="bibr">[4]</ref> stresses that probability is properly a two-place function P(E|H), taken to mean the probability that would be assigned to the proposition E if the proposition H were known to be true. Good introduces the symbol H* to denote "the usual assumptions of logic and pure mathematics," which must be taken as given in all probability assessments. He makes no attempt to decide exactly what should be assumed as part of H*, and says it is conceivable that H* cannot be expressed in a finite number of words. Because our concern is reasoning by computational agents, we depart from Good and insist that the underlying assumptions be formalized as a first-order axiomatic theory. We require that H* be expressed as a finite computational structure, with an effective procedure for generating the axioms explicitly, and an effective procedure for checking whether any given sentence is an axiom. In particular, we assume that H* includes the axioms N1-N5, R1-R9, and P1-P6 (below).</p><p>We introduce into L* the two-place function symbol P. The value of P represents a meaningful probability whenever the following conditions are met (i) the first argument of P is the Gödel number #σ of a sentence σ of L*; (ii) the second argument of P is the Gödel number #ϕ(x) of a one-place open formula ϕ(x) defining a relation representable in T A* = Cn(A*); and (iii) the relation represented by ϕ(x) contains the Gödel numbers of all axioms in A*. The formula ϕ(x) is used to represent the set of sentences whose Gödel numbers satisfy ϕ. P(#σ, #ϕ(x)) represents the probability of σ, given that all sentences in the set represented by ϕ(x) are true. Condition (iii) says that the domain axioms and probability axioms are taken as given. For readability, we write P(σ | ϕ) for P(#σ, #ϕ(x)) and P(σ | τ, ϕ) for P(#σ, #ψ(x)), where #ψ(x) represents the union of the relation defined by ϕ and {#τ}. That is, P(σ | τ, ϕ) represents the likelihood of σ under the assumption that τ and all sentences in the set represented by ϕ(x) are true.</p><p>With this preamble, we now present the probability axioms. The axioms are stated informally for readability; stating them formally is straightforward. The axioms are universally quantified over (Gödel numbers of) sentences σ and τ, and formulas ϕ. The first three axioms are the usual axioms for finitely additive probability. P4 formalizes Bayesian conditioning. P5 is taken from Good <ref type="bibr">[4]</ref>, and formalizes the notion that logically equivalent propositions should be interchangeable with regard to rational degrees of belief.</p><p>Some authors (including Good and de Finetti) regard finite additivity as sufficient to formalize rational degrees of belief. Other authors consider countable additivity to be essential. Because countable additivity is typically taken for granted in applications, we regard it as essential. However, full formalization of countable additivity is not possible within FOL, because FOL cannot express the notion of an arbitrary infinite sequence of Gödel numbers. To formalize countable additivity, we adopt a condition introduced by Gaifman <ref type="bibr">[15]</ref>. Gaifman's condition can be formalized as a first-order axiom schema. Informally, it is stated as: P6. P(∀x ψ(x) | ϕ) is the supremum of the values P(ψ(κ 1 )∨ … ∨ψ(κ n ) | ϕ), for all finite conjunctions ψ(κ 1 )∨ … ∨ψ(κ n ) of sentences, formed by substituting constant terms of L* into ψ(x). The constants κ i may be constants of the original language L, numerical constants (0 or 1), or label constants (one of the L i ). The label constants provide enough constants to cover individuals that might not otherwise be referenced explicitly.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.3">Terminology</head><p>The following definitions provide some necessary terminology. Definition 1: Let L be a first-order language. A p-language L* for L is a language that augments L with symbols for domain elements, real and natural number arithmetic, and probability, as described in Section 3.1 above. A p-theory T A* for an axiomatic theory T A in L augments the axioms A of T A as described in Section 3.2 above, to include: (i) axioms relativizing axioms in A to elements of the original domain; (ii) axioms N1-N5, R-1R9, and P1-P6; and (iii) additional axioms defining a domain-specific probabilistic theory. A p-theory T A* containing only N1-N5, RCF, and P1-P6, with no domain axioms, is called the base p-theory for L.</p><p>Definition 2: Let L be a first-order language; let L* be a p-language for L. An axiomatic theory T A* of L* is probabilistically complete if it assigns a unique probability P(σ | A*) to every sentence σ of L. That is, T A* is probabilistically complete if for every sentence σ there is a unique real number p σ such that</p><formula xml:id="formula_36">T A* |− P(σ | A*) = p σ .</formula><p>A probabilistically complete p-theory assigns a single RCF element to each sentence. Incomplete p-theories give rise to interval probabilities. Some writers have advocated founding the theory of probability on interval rather than point-valued probabilities (e.g., <ref type="bibr">[4]</ref>). The possibility of incomplete p-theories is attractive when the KB designer is not able to specify a probability for every sentence. With the advent of first-order languages based on graphical probability models, it is now possible to Axiomatic First-Order Probability 57 define probabilistically complete p-theories suitable for many interesting problems, to develop workable knowledge engineering procedures for specifying p-theories, and to devise tractable inference and learning algorithms for p-theories.</p><p>Definition 3: Let L be a first-order language; let T be a theory of L; and let L* be a p-language for L. An axiomatic theory</p><formula xml:id="formula_37">T A* of L* corresponds to T if T |− σ implies T A* |− P(σ | A*) = 1 for any sentence σ of L. T A* corresponds strongly to T if T |− σ if and only if T A* |− P(σ | A*) = 1.</formula><p>Clearly, if the axioms A of T A are included among the axioms A* of T A* , then T A* corresponds to T A . In general, augmenting A with N1-N5, RCF, and P1-P6, will not determine a unique p-theory. If it is assumed that A incorporates all objective, incontrovertibly true domain knowledge, then adding probabilistic axioms to complete a p-theory brings subjectivity into the KB.</p><p>Of course, in actual applications, it is rarely the case that all logical axioms are incontrovertibly true assertions. More realistically, some axioms will be highly questionable; others, though quite useful, may be downright false. Axioms in real KBs are carefully engineered to be "good enough for the task." A great deal of subjective judgment goes into developing a "good enough" KB. In short, the logical axioms of a KB are often as subjective as the probabilities, and sometimes more so.</p><p>Definition 4: Let L be a first-order language, L* a p-language for L; and T A* a probabilistically complete axiomatic theory of L*. Let ϕ(x,y) be a formula of L* that functionally represents a recursive sequence of Gödel numbers of sentences of L* (i.e., for each natural number n, there is exactly one sentence σ n such that ϕ(n, #σ n ) is provable from A*). We say the sequence σ 1 , σ 2 , … of sentences is negligible if for every RCF element u &gt; 0 there is a natural number n such that P(σ 1 ∧ … ∧ σ n | A*) &lt; u. The sequence σ 1 , σ 2 , … is certain if for every RCF element u &gt; 0 there is a natural number n such that P(σ</p><formula xml:id="formula_38">1 ∧ … ∧σ n | A*) &gt; 1-u.</formula><p>A negligible sequence is vanishingly improbable. That is, the probabilities of its finite-length leading segments tend to zero as their lengths increase without bound. Clearly, any sequence containing a zero-probability sentence is negligible. We can define negligible or certain individual sentences or finite-length sequences of sentences in the obvious way, by appending infinitely many copies of a tautology to the end of the sequence. An individual sentence is certain if it has probability 1 and negligible if it has probability zero.</p><p>Defintion 5: Let L be a first-order language; let L* be a p-language for L; let T* be a theory of L*. The core of T* is the set of sentences {σ : σ is certain under T*}.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Semantics</head><p>This section defines semantics for p-theories. Standard first-order semantics. The logic set forth in this paper is a standard, untyped first-order logic. As such, it can be given standard first-order model-theoretic semantics.</p><p>A first-order structure for a theory in L* is a pair (D, m), where D is a non-empty set called the domain of interpretation, and m assigns to each function, constant, and Standard semantics for p-theories gives rise to a seeming conflict between logical truth and probabilistic certainty: there may be logically possible sentences that have probability zero. For example, we might represent successive tosses of a symmetric die as independent and identically distributed with probability 1/6 of landing on each face. In a hypothetical infinite sequence of tosses, the frequency of tosses that land on, say, the number 2 is certain to be 1/6. Any sequence of outcomes that does not have a limiting frequency of 1/6 is negligible, in the sense of D4. Nevertheless, every sequence of outcomes is logically possible. Standard first-order semantics cannot distinguish between typical realizations of this probabilistic process (i.e., "random looking" sequences with limiting frequency 1/6) and highly atypical realizations (e.g., sequences that have the incorrect limiting frequency, or exhibit some other unusual regularity, such as a 2 on every sixth toss). Suppose the sentence σ 1 asserts that the limiting frequency is 1/6 that the die lands on 2; the sentence σ 2 asserts that every toss comes up 2; and the sentence σ 3 asserts that a 2 occurs on the first two tosses. The sentence σ 1 has probability 1; σ 2 has probability zero; and σ 3 has probability 1/36. None of these sentences is either implied by or inconsistent with the logical axioms. Traditional first-order semantics seems to provide no way to differentiate among them. For this reason, many authors have considered standard first-order semantics inadequate for probabilistic theories, and have turned to alternative semantics.</p><p>Measure models. A common approach to giving semantics to probabilistic logics is through a probability measure on structures. Using results from measure theory, Gaifman <ref type="bibr">[15]</ref> showed that a coherent probability assignment to quantifier-free sentences can be extended to a countably additive probability measure on a σ-algebra of subsets of {(D, m)}, the set of all structures on D = {L 1 , L 2 , …}. <ref type="foot" target="#foot_5">2</ref> In measure model semantics, a sentence is assigned probability equal to the measure of the set of models of the sentence.</p><p>Just as traditional first-order semantics is defined in terms of set theory, measure model semantics is defined in terms of measure theory. Measure theory is the branch of real analysis used to formalize probability. In measure model semantics, it is generally taken for granted that probabilities are interpreted as real numbers. As noted above, the semantic condition that probabilities must be interpreted as real numbers results in a non-axiomatizable logic that lacks a complete proof theory. On the other hand, axiomatic set theory provides sufficient mathematical machinery to prove the standard results of measure theory. If A* contains set theory axioms, then the results of measure theory hold in all models of A*. Therefore, a probabilistically complete ptheory that includes set theory axioms has a unique measure model. A probabilistically incomplete p-theory has a family of measure models, one for each distribution consistent with T A* . Any measure model assigns probability zero to the set of models of negligible sequences.</p><p>Under measure model semantics, the logical and probabilistic aspects of a theory remain semantically distinct. Provable sentences are true in all ordinary models of T A* , and hence have probability 1 in any measure model of T A* . Unsatisfiable sentences are false in all models and hence have probability zero in any measure model. Other than P1-P6, probabilities for other sentences are unrestricted. In particular, a sentence may provably have probability 1 and yet be false in some models.</p><p>Certainty restriction semantics. If a sentence has probability 1, then conditioning on the sentence does not change its probability or the probability of any other sentence. Furthermore, because there are only countably many sentences, we can condition on all certain sentences -the core of the p-theory -without changing any probabilities. We can use this fact to rule out negligible sentences as models of a ptheory. Note that a p-theory T A* has enough logical machinery to define a provability predicate. We can thus introduce an axiom schema that infers σ from T A* |− P(σ | A*) = 1. Adding this axiom schema excludes provably negligible sentences as models of T A* . We call this axiom schema the certainty restriction. Adding the certainty restriction schema to T A* reduces the set of models of T A* without changing either the probability of any sentence or any of the measure models consistent with T A* .</p><p>A rational agent makes no practical distinction between propositions with probability one and those provable from its knowledge base. Many texts use limiting frequencies (as well as other certain events) to define the meaning of probability statements (e.g., that "fair die" means that the limiting frequency of tosses landing on each face is 1/6). This suggests that the certainty restriction captures some aspect of the intuitive semantics of probability as it is commonly applied and understood.</p><p>Strong probabilistic semantics. The certainty restriction and the conditioning restriction can be formulated in first-order logic. This means that these conditions can be imposed as satisfaction criteria for p-theories without any change to first-order semantics. However, these conditions cannot capture the stronger semantic notion that infinite-length negligible sequences should not occur in models of a probabilistic theory. Strong probabilistic semantics requires that no model of a p-theory may contain all sentences in a negligible sequence of sentences.</p><p>Each negligible sequence can be identified with an effectively null binary string, as defined by Martin-Löf <ref type="bibr">[6]</ref>. Martin-Löf randomness has been studied extensively (c.f., <ref type="bibr">[7]</ref>), and is popular as a characterization of what it means for a sequence to be a typical realization of a probability distribution on sequences. Dawid's <ref type="bibr">[8]</ref> calibration criterion is closely related to Martin-Löf randomness: the set of uncalibrated sequences for a given probability distribution is effectively null for that distribution.</p><p>If the axioms A* of our p-theory are strong enough to formalize measure theory, then we can prove that the set of negligible sequences has probability zero under the measure model for A*. Thus, negligible sequences can be excluded as models of A* without changing any probabilities. However, excluding the negligible sentences as models means abandoning traditional first-order semantics, because the proposition that a sequence is negligible cannot be formalized as a recursive first-order axiom schema. As a consequence, there is no complete proof system for probabilistic logic with strong probabilistic semantics.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>K. Laskey</head><p>Frequency probability. Some authors have argued that fundamentally different kinds of probability are required to formalize different metaphysical notions such as subjective degrees of belief, long-run frequencies, physical randomness, or algorithmic randomness. Others argue that a single kind of probability is adequate for all these metaphysical positions. <ref type="bibr">Good ([4]</ref>, <ref type="bibr">[16]</ref>) and <ref type="bibr">Barnett [17]</ref> discuss the different viewpoints on this issue.</p><p>Because the same mathematics is applied to reason about all these kinds of probability, and a proliferation of different logics complicates knowledge representation and knowledge interchange, it seems reasonable to investigate whether a single computational logic might be applicable to different notions of probability.</p><p>We have argued above that p-theories can represent subjective degrees of belief about propositions that can neither be proven nor disproven. We argue that p-theories can represent long-run frequencies and physical randomness. The basic idea derives from a theorem of de Finetti <ref type="bibr">[18]</ref> stating that an infinitely exchangeable sequence of events is mathematically equivalent to one a frequentist or proponent of physical propensity would model as independent and identically distributed (iid) given an unknown parameter, together with a subjective probability distribution on values of the parameter. To the frequentist, the parameter corresponds to the unknown long-run frequency. To the propensity theorist, the parameter corresponds to the unknown propensity. To the strict subjectivist, the parameter is a modeling fiction that provides a parsimonious representation for an exchangeable sequence.</p><p>Infinitely exchangeable sequences in p-theories are represented as first-order axiom schemas stating that different orderings of finite-length initial segments of a sequence are equally probable. P-theories can also represent iid propositions with unknown parameters. Incomplete p-theories can be used if no subjective distribution is available (perhaps due to philosophical aversion to subjective probability) for the unknown parameter. Thus, frequency and propensity probability as well as degree of belief probability can be represented with p-theories.</p><p>Summary: Semantics for p-theories. With no change to standard first-order semantics, a complete p-theory assigns probabilities consistently to sentences of L* such that the axioms A* have probability 1. By including the certainty restriction axiom schema in A*, we can identify probability 1 with provability from A*. If A* includes set theory axioms, there is a unique measure model (probability measure over models) for each complete p-theory. Requiring that all models of a p-theory be non-negligible in the sense of Martin-Löf would take us out of the realm of first-order model theory.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Learning and Dogmatism</head><p>An attractive feature of probability theory is its inbuilt support for learning from observation. We can add any new non-negligible axiom to a p-theory, and Bayesian conditioning can be used to obtain an updated p-theory with the evidence as an axiom. As new observations accrue, we obtain a sequence of p-theories, each containing additional axioms representing new information obtained since the previous p-theory in the sequence.</p><p>Fundamental to the scientific attitude is lack of dogmatism. In our context, nondogmatism means assigning probability zero only to propositions known incontrovertibly to be false. A dogmatic theory will be overturned if it makes definite empirical predictions that turn out to be false. However, a theory can be dogmatic without ever being proven false. For example, if a theory starts out certain that a coin is fair, it can never learn that the coin is biased, no matter how many trials are observed. A theory that allows for bias will eventually become convinced that a biased coin is biased, even if it begins with a high likelihood that the coin is fair.</p><p>If we begin with an axiomatic theory T A of L, and assume that the axioms A represent a set of sentences known incontrovertibly to be true (whether by definition or by meticulous empirical observation), we would like to be able to represent a ptheory that assigns probability zero to exactly those sentences that can be disproven from A. We say such a p-theory corresponds non-dogmatically to T A . There are many reasons, including tractability, convenience of specification, economy of communication, and the like, that we might choose to represent and reason with a dogmatic theory, as long as it is judged "good enough" for the task at hand. But as a matter of principle, we want the capability to represent a non-dogmatic theory, even if reasoning with it is impractical.</p><p>A result of Gaifman and Snir <ref type="bibr">[11]</ref> would seem to doom any hope of finding a nondogmatic theory. They proved that, under measure-model semantics, every axiomatizable theory is dogmatic. On the other hand, Laskey <ref type="bibr">[19]</ref> described how to specify a non-dogmatic p-theory corresponding to any consistent, finitely axiomatizable firstorder theory. This apparent inconsistency is resolved by noting that Gaifman and Snir assume that all true sentences of natural number arithmetic are base axioms of the logical language, which is therefore not axiomatizable. Gaifman and Snir's no-go result does not apply to axiomatic first-order probability logics. An inevitable price of the axiomatic first-order approach, implied by Tarski's undefinability theorem, is that any complete p-theory must assign probability intermediate between 0 and 1 to some sentences in the language of arithmetic. This is natural if we view probability as a degree of provability. No first-order axiom system can prove all true sentences of arithmetic; therefore, no axiomatic first-order probability logic can assign probability 1 to all true sentences of arithmetic.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6">Conclusion</head><p>As probabilistic languages find increasing application to the Semantic Web (e.g., <ref type="bibr">[20]</ref>), there is a need for a logical foundation that integrates traditional SW logics with probability logic. The logic presented here formalizes probability as a standard axiomatic first-order theory, with no alteration to traditional first-order model theoretic semantics. Advantages of this approach are the ability to represent p-theories as finite computational structures amenable to machine processing, the availability of a complete proof system, and compatibility with semantics of traditional logic-based languages. While this paper focuses on expressive power of the logic, SW applications require tractability and scalability. Future work will consider tractable restrictions of the logic, as well as fast approximate inference methods.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Ontologies are key components of the Semantic Web, and among the formalisms proposed within Knowledge Engineering, the most popular ones at the moment are based on Description Logics (DLs) <ref type="bibr">[1]</ref>. There are however relatively few ontologies available, and on very few subjects. Moreover, building an ontology from scratch can be a very burdensome and difficult task; very often two domain experts design rather different ontologies for the same domain <ref type="bibr">[2]</ref>. Considerable effort is now invested into developing automated means for the acquisition of ontologies. Most of the currently pursued approaches do not use the expressive power of languages such as OWL, and are only capable of learning ontologies of restricted form, such as taxonomic hierarchies <ref type="bibr">[3]</ref>.</p><p>It is therefore natural to try to combine logic-based and probabilistic approaches to machine learning for automated ontology acquisition. Inspired by the success of Inductive Logic Programming (ILP) <ref type="bibr">[4]</ref> and statistical machine learning, in this paper we describe methods that learn ontologies in the recently proposed Probabilistic Description Logic crALC <ref type="bibr">[5]</ref>. In using statistical methods we wish to cope with the uncertainty that is inherent to real-world knowledge bases, where we commonly deal with biased, noisy or incomplete data. Moreover, many interesting research problems, such as ontology alignment and collective classification, require probabilistic inference over evidence.</p><p>Probabilistic Description Logics are closely related to Probabilistic Logics that have been extensively researched in the last decades <ref type="bibr">[6,</ref><ref type="bibr">7]</ref>. Some logics <ref type="bibr">[8]</ref> admit inference based on linear programming, while other resort to independence assumptions and graph-theoretical models akin to Bayesian and Markov networks. Despite the potential applicability of Probabilistic Description Logics, learning ontologies expressed in these logics is a topic that has not received due attention. In principle, it might be argued that the same methods for learning probabilistic logics might be applied, with proper account of differences in syntax and semantics. In this paper we follow this path and report on some similarities and differences that may be of interest.</p><p>The semantics of the Probabilistic Description Logic crALC <ref type="bibr">[5]</ref> is based on measures over interpretations and on assumptions of independence. Scalability issues for inference in crALC have been addressed so that we can run inference on medium size domains <ref type="bibr">[9]</ref>. There are two learning tasks that deserve attention. First, learning probability values, perhaps through a maximum likelihood estimation method. We use this technique and, due to uncertainty in Semantic Web datasets, we employ the EM algorithm. The second task is learning logical constructs, where we are interested in finding a set of concepts and roles that best fit examples and where probabilistic assessments can be assigned. In ILP algorithms such as FOIL <ref type="bibr">[10]</ref>, one commonly relies on a cover function to evaluate candidate hypotheses. We approach learning concepts as a classification task, and based on an efficient probabilistic Noisy-OR classifier <ref type="bibr">[11,</ref><ref type="bibr">12]</ref>, we guide the search among candidate structures.</p><p>Section 2 reviews key concepts useful to the remainder of the paper. In Section 3, algorithms for learning crALC ontologies are introduced. Once these algorithms have formally stated, we wish to explore semi-automated reasoning from a real world dataset -the Lattes curriculum platform. A first attempt at constructing a probabilistic ontology using this dataset is reported in Section 4.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Background</head><p>Assume we are provided with a repository of HTML pages where researchers and students have stored data about publications, courses, languages, and further relational data. In order to structure such knowledge we might choose to use ontologies. We may extract concepts such as Researcher and Person, and we may establish relationships such as ⊑ among them. These concepts are often expressed using Description Logics (Section 2.1). Suppose we are not able to precisely state membership relations among concepts, but instead we can give probabilistic assessments such as P (Student|Researcher) = 0.4. Such assessments are encoded in Probabilistic Description Logics such as crALC (Section 2.2). Suppose further that we look for automated means to learn ontologies given assertions on concepts such as Student(jane); this task is commonly tackled by Description Logic Learning algorithms (Section 2.3).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1">Description Logics</head><p>Description Logics (DLs) form a family of representation languages that are typically decidable fragments of First Order Logic (FOL) with particular seman-tics <ref type="bibr">[13,</ref><ref type="bibr">14]</ref>. DLs represent knowledge in terms of objects, concepts, and roles. Each concept in the set of concepts N C = {C, D, . . .} is interpreted as a subset of a domain (a set of objects). Each role in the set of roles N R = {r, s, . . .} is interpreted as a binary relation on the domain. Individuals represent the objects through names from the set of names N I = {a, b, . . .}. Information is stored in a knowledge base divided in (at least) two parts: the TBox (terminology) and the ABox (assertions). The TBox describes the terminology by listing concepts and roles and their relationships. The ABox contains assertions about objects.</p><p>Complex concepts are built using atomic concepts, roles and constructors. Depending on the constructors involved one can obtain different expressive power and decidability properties. The semantics of a description is given by a domain ∆ and an interpretation, that is a functor • I . We refer to <ref type="bibr">[13]</ref> for further background on Description Logics.</p><p>One of the central ideas in DL is subsumption <ref type="bibr">[14]</ref>: Given two concepts descriptions C and D in T , C subsumes D denoted by C ⊒ D, iff for every interpretation I of T it holds that C I ⊇ D I . Also, C ≡ D amounts to C ⊒ D and D ⊒ C.</p><p>Subsumption is a useful inference mechanism that allow us to perform standard reasoning tasks such as instance checking and concept retrieval. Instance checking is valuable for our ILP methods because it amounts to produce classmembership assertions: K |= C(a), where K is the knowledge base, a is an individual name and C is a concept definition given in terms of the concepts accounted for in K.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2">The Logic crALC</head><p>The logics mentioned in Section 2.1 do not handle uncertainty through probabilities. It might be interesting to assign probabilities to assertions, concepts, roles; the Probabilistic Description Logic crALC does just that.</p><p>crALC is a probabilistic extension of the DL ALC. The following constructors are available in ALC: conjunction (C ⊓ D), disjunction C ⊔ D, negation (¬C), existential restriction (∃r.C), and value restriction (∀r.C). Concepts inclusions and definitions are allowed and denoted by C ⊑ D and C ≡ D, where C is a concept name. The semantics is given by a domain D and an interpretation I. A set of concept inclusions and definitions is a terminology. A terminology is acyclic if it is a set of concept inclusions/definitions such that no concept in the terminology uses itself.</p><p>A key concept in crALC is probabilistic inclusion, denoted by P (C|D) = α, where D is a concept and C is a concept name. If the interpretation of D is the whole domain, then we simply write P (C) = α. We are interested in computing a query P (A o (a 0 )|A) for an ABox A = {A j (a j )} M j=1 (this is an inference). Assume also that C in role restrictions ∃r.C and ∀r.C is a concept name. As probabilistic inclusions must only have concept names in their conditioned concept, assessments such as P (∀r.C|D) = α or P (∃r.C|D) = α are not allowed.</p><p>We assume that every terminology is acyclic; this assumption allows one to draw any terminology T as a directed acyclic graph G(T ): each concept name is a node, and if a concept C directly uses concept D, then D is a parent of C in G(T ). Each existential and value restriction is added to the graph G(T ). As each one of these restrictions directly uses r and C, the graph must contain a node for each role r, and an edge from r to each restriction directly using it. Each restriction node is a deterministic node in that its value is completely determined by its parents.</p><p>The semantics of crALC is based on probability measures over the space of interpretations, for a fixed domain. Inferences can be computed by a first order loopy propagation algorithm that has been shown to produce good approximations for medium size domains <ref type="bibr">[9]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.3">Inductive Logic Programming and Description Logic Learning</head><p>ILP is a research field at the intersection of machine learning <ref type="bibr">[15]</ref> and logic programming <ref type="bibr">[16]</ref>. It aims at a formal framework as well as practical algorithms for inductively learning relational descriptions from examples and background knowledge. Learning is commonly regarded as a search problem <ref type="bibr">[17]</ref>; indeed, in ILP there is a space of candidate solutions, the set of "well formed" hypotheses H, and an acceptance criterion characterizing solutions. In concept-learning and ILP the search space is typically structured by means of the dual notions of generalization and specialization.</p><p>A significant algorithm for ILP is FOIL <ref type="bibr">[10]</ref>. This algorithm moves from an explicit representation of the target relation (as a set of tuples of a particular collection of constants) to a more general, functional definition that might be applied to different constants. For a particular target relation, FOIL finds clauses in FOL one at a time, removing tuples explained by the current clause before looking for the next through a cover method. FOIL uses an information-based heuristic to guide its search for simple, general clauses. Because of its simplicity and computational efficiency, we have chosen to develop a covered approach when learning Probabilistic Description Logics.</p><p>There have been notable efforts to learn ontologies in Description Logics; some of these previous results have directly inspired our work. As noted by Fanizzi et al <ref type="bibr">[14]</ref>, early work on learning in DLs essentially focused on demonstrating the PAC-learnability for various languages derived from CLASSIC. Many approaches to the problem of learning concept definitions in DL formalisms can be classified in two categories <ref type="bibr">[2]</ref>: in one category the problem is approached by translating it to another formalism in which concept learning has already been investigated, while in the other category the problem is approached in the original formalism.</p><p>One example of the first approach can be found in the work of Kietz <ref type="bibr">[18]</ref>, where the hybrid language CARIN-ALN is used <ref type="bibr">[19]</ref>. This language combines a complete structural subsumption service in a DL with Horn logic, where terms are individuals in the domain. Likewise, in the AL-log framework <ref type="bibr">[20]</ref>, DATA-LOG clauses are combined with ALC constructs. In the same direction, DL+log <ref type="bibr">[21]</ref> allows for the tight integration of DLs and DATALOG. Arguably, the decidable knowledge representation framework SHIQ+log <ref type="bibr">[1]</ref>, is the most powerful among the ones currently available for the integration of DLs and clausal logic. First, it relies on the very expressive DL SHIQ. Second, it allows for inducing a definition for DL concepts thus having ontology elements not only as input but also as output of the learning process.</p><p>The problem of translation turns out to be similar to an ILP problem. There are two issues to address: incompatibilities between DLs and Horn Logic, and the fact that the OWA<ref type="foot" target="#foot_6">1</ref> is used in DLs.</p><p>The other approach, solving the learning problem in the original formalism, can be found in the work of Cohen and Hirsh <ref type="bibr" target="#b93">[22]</ref>, which uses a pure DL-based approach for concept learning, in this case on the CLASSIC DL language. In these algorithms, ILP has been a significant influence, as refinement operators have been extensively explored. Badea and Nienhuys-Cheng <ref type="bibr" target="#b94">[23]</ref> suggest a refinement operator for the ALER description logic. They also investigate some theoretical properties of refinement operators that favour the use of a downward refinement operator to enable a top-down search.</p><p>Learning algorithms for DLs (in particular for the language ALC) were created by Iannone et al <ref type="bibr">[2]</ref> that also make use of refinement operators. Instead of using the classical approach of combining refinement operators with a search heuristic, they developed an example driven learning method. The language, called YINYANG, requires lifting the instances to the concept level through a suitable approximate operator (most specific concepts MSCs) and then start learning from such extremely specific concept descriptions. A problem of these algorithms is that they tend to produce unnecessarily long concepts. One reason is that MSCs for ALC and more expressive languages do not exist and hence can only be approximated.</p><p>These disadvantages have been partly mitigated in the work of Lehmann <ref type="bibr" target="#b95">[24]</ref>, where approximations are not needed because it is essentially based on a genetic programming procedure lying on refinement operators whose fitness is computed on the grounds of the covered instances. In the DL-LEARNER system <ref type="bibr">[3]</ref> further refinement operators and heuristics have been developed for the ALC logic.</p><p>The DL-FOIL system <ref type="bibr">[14]</ref> is a new DL version of the FOIL <ref type="bibr">[10]</ref> algorithm, that is adapted to learning the DL representations supporting the OWL-DL language. The main components of this new system are represented by a set of refinement operators borrowed from other similar systems <ref type="bibr">[2,</ref><ref type="bibr">3]</ref> and by a different gain function (proposed in FOIL-I <ref type="bibr" target="#b96">[25]</ref>) which must take into account the OWA inherent to DLs. In DL-FOIL, like in the original FOIL algorithm, the generalization routine computes (partial) generalizations as long as they do not cover any negative example. If this occurs, the specialization routine is invoked for solving these sub-problems. This routine applies the idea of specializing using the (incomplete) refinement operator. The specialization continues until no negative example is covered (or a limited number of them).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Probabilistic Description Logic Learning</head><p>In this section, we focus on learning DL axioms and probabilities tailored to crALC. To learn the terminology component we are inspired by Probabilistic ILP methods and thus we follow generic syntax and semantics given in <ref type="bibr">[16]</ref>. The generic supervised concept learning task is devoted to finding axioms that best represent assertions positive (covered) and negatives, in a probabilistic setting this cover relation is given by: Definition 1. (Probabilistic Covers Relation) A probabilistic covers relation takes as arguments an example e, a hypothesis H and possibly the background theory B, and returns the probability value P (e|H, B) between 0 and 1 of the example e given H and B, i.e., covers(e, H, B) = P (e|H, B).</p><p>Given Definition 1 we can define the Probabilistic DL learning problem as follows <ref type="bibr">[16]</ref>: We assume each candidate hypothesis together with the example e for the target concept as being a probabilistic variable or feature in a probabilistic model<ref type="foot" target="#foot_7">2</ref> ; according to available examples, each candidate hypothesis turns out to be true, false or unknown whether result for instance checking C(a) on K, Ind(A) is respectively true, false or unknown. The learning task is restricted to finding a probabilistic classifier for the target concept.</p><p>A suitable framework for this probabilistic setting is the Noisy-OR classifier, a probabilistic model within the Bayesian networks classifiers commonly referred to as models of independence of clausal independence (ICI) <ref type="bibr">[12]</ref>. In a Noisy-OR classifier we aim at learning a class C given a large number of attributes.</p><p>As a rule, in an ICI classifier for each attribute variable A j , j = 1, . . . , k (A denotes the multidimensional variable (A 1 , . . . , A k ) and a = (a 1 , . . . , a k ) the EM algorithm of the Noisy-OR classifier to estimate the class ascribed to the instance.</p><formula xml:id="formula_39">Input: a target concept C, background knowledge K = (T , A), a training set E = Ind + C (A) ∪ Ind − C (A) ⊆ Ind(A)</formula><p>In order to learn probabilities associated to terminologies obtained for the former algorithm we commonly resort to the EM algorithm. In this sense, we are influenced in several respects from approaches given in <ref type="bibr">[16]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Preliminary Results</head><p>To demonstrate feasibility of our proposal, we have run preliminary tests on relational data extracted from the Lattes curriculum platform, the Brazilian government scientific repository <ref type="foot" target="#foot_9">4</ref> . The Lattes platform is a public source of relational data about scientific research, containing data on several thousand researchers and students. Because the available format is encoded in HTML, we have implemented a semi-automated procedure to extract content. A restricted database has been constructed based on randomly selected documents. We have performed learning of axioms based on elicited asserted concepts and roles, further probabilistic inclusions have been added according to the crALC syntax. Figure <ref type="figure" target="#fig_3">3</ref> illustrates the network generated for a domain of size 2.</p><p>For instance, to properly identify a professor, the following concept description has been learned:</p><formula xml:id="formula_40">Professor ≡ Person ⊓(∃hasPublication.Publication ⊔ ∃advises.Person ⊔ ∃worksAt.Organization)</formula><p>When Person(0)<ref type="foot" target="#foot_10">5</ref> is given by evidence, the probability value P (Professor(0)) = 0.68 (we have considered a large number of professors in our experiments), as The former concept definition can conflict with standard ILP approaches, where a more suitable definition might be mostly based on conjuntions. In contrast, in this particular setting, the probabilistic logic approach has a nice and flexible behavior. However, it is worth noting that terminological constructs basically rely on the refinement operator used during learning.</p><p>Another query, linked to relational classification, allows us to prevent duplicate publications. One can be interested in retrieving the number of publications for a given research group. Whereas this task might seem trivial, difficulties arise mainly due to multi-authored documents. In principle, each co-author would have a different entry for the same publication in the Lattes platform, and it must be emphasized that each entry is be prone to contain errors. In this sense, a probabilistic concept for duplicate publications was learned: DuplicatePublication ≡ Publication ⊓(∃hasSimilarTitle.Publication ⊔ ∃hasSameYear.Publication ⊔hasSameType.Publication))</p><p>It clearly states that a duplicate publication is related to publications that share similar title 6 , same year and type (journal article, chapter book and so on). At first, the prior probability is low: P (DuplicatePublication(0)) = 0.05. Evidence on title similarity increases considerably the probability value: P (DuplicatePublication(0)|∃hasSimilarTitle(0, 1)) = 0.77. Further evidence on type almost guarantees a duplicate concept: P (DuplicatePublication(0)|∃hasSimilarName(1) ⊓ ∃hasSameType(1)) = 0.99.</p><p>It must be noted that title similarity does not guarantee a duplicate document. Two documents can share the same title (same author), but nothing prevents them from being published on different means (for instance, a congress paper and an extended journal article). Probabilistic reasoning is valuable to deal with such issues.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Conclusion</head><p>In this paper we have presented algorithms that perform learning of both probabilities and logical constructs from relational data for the recently proposed Probabilistic DL crALC. Learning of parameters is tackled by the EM algorithm whereas structure learning is conducted by a combined approach relying on statistical and ILP methods. We approach learning of concepts as a classification task; a Noisy-OR classifier has been accordingly adapted to do so.</p><p>Preliminary results have focused on learning a probabilistic terminology from a real-world domain -the Brazilian scientific repository. Probabilistic logic queries have been posed on the induced model; experiments suggest that our methods are suitable for learning ontologies in the Semantic Web.</p><p>Our planned future work is to investigate the scalability of our learning methods.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Many ontology definition languages have been developed to define ontologies in a formal way. Among them the OWL<ref type="foot" target="#foot_12">3</ref> which is based on crisp logic. This language suffers from its lack to represent real domains containing incomplete knowledge or uncertain information. To overcome this, an extension of the OWL seems to be a convenient solution. Many researches find this extension important and try to propose approaches for handling uncertainty in ontology field. For that purpose, two main mathematical theories have been applied: the probability theory ([2], <ref type="bibr">[7]</ref>) and the fuzzy sets theory ([4], <ref type="bibr">[6]</ref>).</p><p>However not all the problems of uncertainty lend themselves to one of these theories. We can find ourselves faced to situations where we are called to represent the total ignorance or the partial one about information concerning classes. This can be resolved by applying the Dempster-Shafer theory <ref type="bibr">[5]</ref>. At this stage, we are interested to use this theory and especially we are encouraged to work with the directed evidential networks <ref type="bibr">[1]</ref> which are viewed as effective and appropriate graphical representation for uncertain knowledge. Adding to that, the use of conditional belief functions provides a well representation of the uncertainty in the relationships among the variables of a graph.</p><p>In this position paper we present our tool BeliefOWL as an approach for extending an OWL ontology with belief functions as well as the translation of this ontology into an evidential network.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Uncertainty in OWL</head><p>The OWL is an expressive language for representing classes and the relations between them for a domain of discourse. However the source of information itself can suffer from giving a sufficient information of a concept. Sometimes we can find ourselves unable to express the exact relation existing between classes because of an incomplete knowledge about the domain of discourse or missed values. Uncertainty extension to the OWL is starting to know a considerable focus during the last years.</p><p>To cope with uncertain information in OWL extension, we propose the use of the Dempster-Shafer theory <ref type="bibr">[5]</ref>. In fact this theory allows assigning beliefs not only to a single element but to a set of elements. Furthermore, it gives the experts the possibility to represent the total ignorance or the partial one about information concerning the classes of an ontology and the relations that may exist between them. Besides, this theory provides a method for combining several pieces of evidence from different sources to establish a new belief by using Dempster's rule of combination.</p><p>One of our goal is to translate an OWL taxonomy into a directed evidential network (DEVN). The DEVN is a model introduced in <ref type="bibr">[1]</ref> to represent knowledge under uncertainty by using the belief functions. It is defined as a directed acyclic graph (DAG) where the nodes represent variables and the directed arcs linking nodes describe conditional dependence relations between these variables. These relations are expressed by conditional belief functions for each variable given its parents. Two kinds of belief functions are depicted to represent uncertainty in the DEVN: the prior belief function and the conditional belief function. The former concerns the root node and the latter expresses the belief function of a node given the value taken by its parents.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Presentation of the BeliefOWL</head><p>The figure 1 resumes the different steps followed leading to our tool. In fact the BeliefOWL has as input an OWL ontology and as output a directed evidential network (DEVN).</p><p>Step 1: A Belief Extension to OWL: An OWL ontology can define classes, properties and individuals. In this paper we will focus on attributing belief masses to the different classes of an OWL taxonomy. For this purpose, we define some new classes able to represent and to introduce this uncertain information.</p><p>-Prior evidence: We define two classes to express the prior evidence &lt;belief-Distribution&gt; and&lt;priorBelief&gt;. The former is used to enumerate the different masses related to the different classes of an OWL taxonomy. It has an object property &lt;hasPriorBelief&gt; that specifies the relation between classes &lt;beliefDistribution&gt; and &lt;priorBelief&gt;. The latter expresses the prior evidence and has a datatype property &lt;massValue&gt; which enables to assign a mass value between 0 and 1. -Conditional evidence: It is defined through two main classes &lt;beliefDistri-bution&gt; and&lt;condBelief&gt;. The former is the same as in the case of prior evidence but has an object property &lt;hasCondBelief&gt;. The latter identifies the conditional evidence and has a datatype property&lt;massValue&gt;.</p><p>Step 2: Constructing an Evidential Network: Given an OWL ontology, we translate it in a DAG by specifying the different nodes to be created as well as the relations existing between these nodes. The construction of the DAG interests some of the OWL statements those related to classes.</p><p>-&lt;owl:class&gt;: It is represented as a variable node in the translated DEVN.</p><p>-&lt;rdfs:subClassOf&gt;: When a class is a subclass of another one, a directed arc is drawn from the superclass node to the child subclass node. -&lt;owl:disjointWith&gt;,&lt;owl:equivalentClass&gt;:When two classes are related to each other by one of these statements, a new node is created in the translated DEVN and a directed arc is drawn between the two classes and the node added. -&lt;owl:intersectionOf&gt;: A class C may be defined as the intersection of some classes C i (i,. . . ,n). This can be represented in the translated DEVN by an arc from each C i to C and another one from C and each C i to a new node created for representing the intersection. -&lt;owl:unionOf&gt;: A class C may be defined as the union of some classes C i (i,. . . ,n). This can be represented in the translated DEVN by an arc from C to each C i to C and another one from C and each C i to a new node created for representing the union.</p><p>Step 3: Evidence Attribution: Once the DAG of our network is constructed, the remaining issue is to assign masses for each node of the network. Considering the DAG that we have got, we can depict two kinds of nodes:</p><p>-ClassesNodes: are the nodes representing the different classes of our taxonomy and defined by &lt;owl:class&gt;. To this kind of nodes we attribute the prior belief functions and the conditional ones given into the evidential ontology. -ConstNodes: are those related to the constructors of our taxonomy without considering &lt;rdfs:subClassOf&gt; because this kind of constructor is not represented by a specific node. Concerning the constNodes, masses will be attributed according to the constructor we are talking about. In fact if we have a node created to depict an intersection between two classes, the mass will be attributed by applying the Dempster's rule of combination. Concerning the node representing an union, the disjunctive rule of combination will be applied in that case.</p><p>Once our evidential network is constructed and the masses are assigned to each node a propagation process can be performed.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Conclusion</head><p>In this paper, we have presented the beliefOWL which is a new approach for representing uncertainty in an OWL ontology. We considered only the case for including uncertainty in classes. This uncertainty is modeled via the Dempster-Shafer theory of evidence. We have presented the theoretical aspects of our tool which consists on translating an OWL ontology into a network. For this purpose, we extend the OWL ontology classes with belief masses, then we apply structural translation rules in order to get a DAG of a directed evidential network. The masses added to the ontology will be extracted and will be attributed to the network's nodes classes.</p><p>Further work can carry about the properties and the individuals. The prior beliefs assigned to the different nodes of the network are given by an expert, in the future the assignment can be done automatically through a learning process.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Almost by definition, creative knowledge discovery is difficult to automate and harder to assess objectively. By creative knowledge discovery, we mean finding previously unknown links between concepts or small "chunks" of knowledge in such a way that useful additional knowledge is generated. It can be distinguished from "standard" knowledge discovery by defining the latter as the search for explanatory and/or predictive patterns and rules in large volume data within a specific domain. For example, a knowledge discovery process might examine an ISP(internet service provider)'s customer database and determine that people who have a high monthly spend and who send more than three emails to the support centre in a single month are very likely to change to a different provider in the following month. Such knowledge is implicit within the data but is useful in predicting and understanding behaviour.</p><p>By contrast, creative knowledge discovery is more concerned with "thinking the unthought-of" and looking for new links, new perspectives, etc. Such links are often found by drawing parallels between different domains and looking to see how well those parallels hold -for example, compare the ISP example mentioned above to a hotel chain finding that regular guests who report dissatisfaction with two or more stays often cease to be regular guests unless they are tempted back by special treatment (such as complimentary room upgrades). This is a simple illustration of similar problems (losing customers) in different domains. A solution in one domain (complimentary upgrades) could inspire a solution in the second (e.g. a higher download allowance at the same price). Of course, such analogies may break down when probed too far but they often provide the creative insight necessary to spark a new solution through a new way of looking at a problem. In many cases, this inspiration is often referred to as "serendipity", or accidental discovery.</p><p>It is possible that many serendipitous discoveries are subsequently rationalised as the outcome of rigorous application of the scientific process. The traditional view of the scientist is as a generator and tester of hypotheses -often this is presented as an almost mechanical process and systems such as King's robot scientist <ref type="bibr">[1]</ref> take this to an extreme, using an inductive logic programming approach to systematically generate and test hypotheses in a laboratory.</p><p>In this paper we outline a project to automate creative knowledge discovery. The aim is to find parallels between different knowledge repositories -in this case, semantically annotated networks of documents or process models -in the hope of transferring useful links from one network to another. In the case of process models from different domains, the aim is to identify possible improvements in one process if its analogue in the other domain is more efficient in some way.</p><p>This work shares many of the problems faced by research into uncertainty in the semantic web -the mapping between repositories is very similar to a mapping between ontologies, and the creation of knowledge networks encounters several issues that are well-known from the semantic web, such as the need for imprecise concepts, integration of sources that represent entities and classes at different levels of detail etc. The work is at an early stage, and this paper briefly outlines (i) a possible approach to automating creativity which relies on the use of fuzzy taxonomies and (ii) preliminary work on automatic extraction of taxonomies from data; this requires a representation of uncertainty similar to that needed for the semantic web.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">A Method for Creative Knowledge Discovery</head><p>Can creativity -in this sense of suddenly making novel connections -be automated? Koestler <ref type="bibr">[2]</ref> summarised this view of creativity as follows:</p><p>"The creative act is not an act of creation in the sense of the Old Testament. It does not create something out of nothing: it uncovers, selects, re-shuffles, combines, synthesizes already existing facts, idea, faculties, skills. The more familiar the parts, the more striking the new whole" Table <ref type="table" target="#tab_2">1</ref> -attributes of two music players (taken from <ref type="bibr">[4]</ref>)</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Conventional tape recorder</head><p>Sony Walkman big small clumsy neat records does not record plays back plays back uses magnetic tape uses magnetic tape tape is on reels tape is in cassette speakers in cabinet speakers in headphones mains electricity battery Sherwood <ref type="bibr">[3]</ref> proposes a systematic approach, in which a situation or artefact is represented as an object with multiple attributes, and the consequences of changing</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>82</head><p>T. Martin, Z. Siyao and A. Majidian attributes, removing constraints, etc are progressively explored. For example, given an old style reel-to-reel tape recorder as starting point, Sherwood's approach is to list some of its essential attributes, substitute plausible alternatives for a number of these attributes, and evaluate the resulting conceptual design or solution. Table <ref type="table" target="#tab_2">1</ref> shows how this could have led to the Sony Walkman in the late 70s <ref type="bibr">[4]</ref>. Again, with the benefit of hindsight the reader should be able to see that by changing magnetic tape to a hard disk and considering the way music is purchased and distributed, the same method could (retrospectively, at least) lead one to invent the iPod. Of course, having the vision to choose new attributes and the knowledge and foresight to evaluate the result is the hard part -and the creative steps are usually only obvious with hindsight. This systematic approach is ideally suited to handling data which is held in an object-attribute-value format, provided we have a means of changing/generalising attribute values. We intend to use taxonomies for this purpose, so that "sensible" changes can be made (e.g. mains, battery are both possible values for a power attribute). Representing an object O as a set of attribute-value pairs</p><formula xml:id="formula_41">! a i , v i (</formula><p>) attribute a i of object O has value v i { } we generate a new "design"</p><formula xml:id="formula_42">! O * = a i , T v i ( ) ( ) { }</formula><p>by changing one or more values using T i , a non-deterministic transformation of a value to another value from the same taxonomy. Given sufficient time, this would simply enumerate all possible combinations of attribute values. We can reduce the search space by looking at the solution to an analogous problem in a different domain.</p><p>Our aim is to adapt previously developed tools for taxonomy matching <ref type="bibr">[5]</ref> so that analogies can be found; the next section briefly outlines a way to extract taxonomic structure when it is not explicitly available.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Extracting Embedded Soft Taxonomies</head><p>An ontology essentially consists of a taxonomy of concepts, one or more relations between concepts, and rules which impose constraints and allow data transformation. The idea of an ontology is central to the semantic web <ref type="bibr">[6]</ref>, although there can be a very high cost in creation and maintenance. This is reflected in practical experienceit is rare to find web-based data that is fully marked up with RDF or OWL metadata. It is far more common to encounter data that is stored in a relational database or an equivalent XML-tagged format. Such data often contains implicit taxonomies -a relational table may flatten hierarchical data into one or more attributes. For example, a film database may record genre(s) and sub-genre(s) as separate fields, hiding the hierarchical dependency. The hierarchy may be obvious to a human reader of the data, but it is invisible to the machine. Similarly, XML tags can hide structure. XML relies on human interpretation for its "semantics" -a programmer can take advantage of the fact that &lt;iPod&gt; and &lt;walkman&gt; are subtypes of &lt;music Player&gt;, but a program has no way of knowing this unless it is made explicit by means of a taxonomy. Although a well-designed schema will make hierarchical structure explicit, our experience is that a significant proportion of data sources rely on programmer intuition instead.</p><p>We have investigated formal concept analysis (FCA) <ref type="bibr">[7,</ref><ref type="bibr">8]</ref> as a way of extracting hidden structure from a dataset in object-attribute-value form. In its simplest form, FCA considers a binary-valued table, where each row corresponds to an object and Fuzzy Taxonomies for Creative Knowledge Discovery each column to an attribute (property). The extension to a fuzzy case is (relatively) straightforward, by considering a fuzzy relation R* and alpha-cuts which reduce the problem to the crisp case. A brief outline and promising initial results are given in <ref type="bibr">[9]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Applications</head><p>Two specific domains form demonstrators for this work. XML process mining algorithms exist to discover process model from log files; various additions include heuristic and fuzzy approaches to handle noisy data. Semantic processing mining involves ontology knowledge. The ProM [www.processmining.org] platform takes SA-MXML (semantic annotated mxml) files as input, where the annotation conforms to the Web Service Modelling Language. The aim of this demonstrator is to find (partial) similarities between process models in different domains, and use process simulation tools to determine whether one process can be improved by slightly altering it to match the second process more closely. The second demonstrator is based on web forum discussions and support centre documentation, and will attempt to improve the automated provision of "help" information.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Summary</head><p>This paper has briefly outlined a project to automate aspects of creative knowledge discovery. The project is in early stages. Although not a direct application of uncertain reasoning in the semantic web, it shares many of the same problems and useful cross-fertilisation of ideas should be possible.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>The need for reasoning over uncertain information within the semantic web occurs in many different situations. It can arise from intrinsic uncertainty in the world being modeled or from limitations of the sensing or reasoning agent itself (epistemic). The term uncertainty is often used to refer to many different notions including ambiguity, randomness, vagueness, inconsistency, incompleteness <ref type="bibr">[1]</ref> <ref type="bibr">[9]</ref>.</p><p>In recent years an approach to the semantic web, called linked data, has been developed and offers a promising route to practical and widespread semantic web uptake. It provides a set of design guidelines or patterns for how the semantic web technologies, and broader web architecture, can be used for sharing information. The existing guidelines and practices have no provision for representation of uncertainty; yet linked data is indeed fraught with many of these different types of uncertainty.</p><p>In this brief position paper we examine the ways in which uncertainty can occur in a linked data setting and sketch possible approaches to addressing the issues raised.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Linked data</head><p>Linked Data is a set of conventions for publishing data on the semantic web. It is based on principles outlined by Tim Berners-Lee <ref type="bibr">[2]</ref>. These principle advocate the use of http URIs for naming entities, the publication of data about these URIs using the standards (RDF, SPARQL) and inclusion of links to other URIs so that agents can discover more information. While quite simple these guidelines, along with a growing body of practical advice <ref type="bibr">[3]</ref>, have led to publication and linking of many datasets in this form <ref type="bibr">[4]</ref>. This has resulted in high profile commercial applications such as <ref type="bibr">[5]</ref>.</p><p>While not explicitly stated, the style of linked data places an emphasis on data sharing and simplicity, with corresponding less emphasis on depth of modeling and reasoning. Yet the intrinsic nature of the linked data approach leads to issues of uncertainty representation and reasoning. This is due to the emphasis on cross-linking 86 D. Reynolds multiple data sources that have been independently developed and modeled. Uncertainty can arise from the instance linking process, from the mapping between different sources models and due to differing hidden assumptions in the underlying datasets. Yet the essence of linked data, and a large part of the reason for its uptake, is simplicity. The data is intended to be self-descriptive and accessible through simple link following and graph union or through SPARQL endpoints. Our challenge is to develop a common, easy to deploy, approach to uncertainty representation which can be applied to linked data sets without losing this simplicity.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Some sources of uncertainty in linked data applications</head><p>In this section we enumerate some key sources of uncertainty for linked data. We focus on the sources which directly result from the intrinsic nature of linked data -the cross-linking of independently developed RDF datasets.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1">Ambiguity resulting from data merging</head><p>In linked data, entities (Individuals) which co-occur with different URIs in different datasets are unified. This is achieved by publishing owl:sameAs relations between identified entities, either within the dataset or as a separate link set. The process of identifying such co-references is imperfect. Firstly, the co-references are typically found by a mixture of string matching, attribute matching, and type constraints, generally based on a statistical or machine learning algorithm <ref type="bibr">[6]</ref>. Thus co-references are only identified with some probability (or less formal heuristic weighting). Yet the asserted links are binary and the strength of association is lost. Secondly, the nature of the entities is ambiguous in some datasets. For example, Wikipedia and thus DBPedia conflate the concepts of the City Bristol in the UK and the associated Unitary Authority. A co-reference link that identified the ambiguous DBPedia concept with one that specifically denotes the Unitary Authority would be an error in general, even though it may be an acceptable approximation in some situations.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2">Misalignment of precision and assumptions between merged sources</head><p>Many datasets in the linked data web publish property values for the entities they describe; for example, the population of the City of London. Yet those values are sometimes imprecise or dependent upon measurement assumptions that are not made explicit. For example, the population of a city depends on the time of the measurement, the measurement methodology and the precise definition of the boundary of the city; it is also subject to statistical uncertainty. As a result, at the time of writing, a linked data query on London returns a graph with four assertions on its population ranging from 7,700,000 to 8,500,000. One of these sources of variation, the time of measurement, is sometimes made explicit in data and indeed one of the four assertions is (indirectly) time qualified. However, such contextual qualification is not consistently available and, in any case, only accounts for one source of variation. Thus when datasets are linked the resulting union will often have multiple conflicting values for supposedly functional properties.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.3">Misalignment of models</head><p>When linking datasets we also want to map the associated ontologies. This process is just as error prone as entity co-reference since the axiomatization of concepts in the ontologies is rarely so complete as to allow a unambiguous mapping. Errors in the ontology mapping can lead to global effects such as unexpected identification of related concepts. Determining and publishing such alignment errors is the subject of considerable research and is outside the scope of this paper.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.4">Absence of source reliability information</head><p>Separate from the uncertainty arising from combination and linking of datasets then the datasets themselves can be uncertain or contain errors (either accidental or malicious). While this is true in general in the semantic web, the linked data approach implies broad cross linking with no provision for narrow scoping of link references. This exacerbates the problems of the veracity or trustworthiness of included datasets.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Mitigation approaches</head><p>We now discuss approaches to mitigate the effects of these uncertainty sources on the consumers of linked data. In keeping with linked data methodology we seek simple, broadly applicable, design patterns. In particular, we suggest the need for design patterns for making the uncertainty inherent in the linked datasets more explicit, and mechanisms to enable selective combination of datasets (so that problematic values or links can be omitted). In this a short position paper we only sketch the suggested approaches as a basis for discussion in the workshop.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.1">Link vocabulary</head><p>The link vocabulary would provide a common representation for co-reference links, enabling publication of the link certainty information on which per-link inclusion decisions can be made. This can be achieved by extending the voiD ontology <ref type="bibr">[8]</ref> with a concept UncertainLinkSet (as a subclass of void:LinkSet), and associated properties for describing the method used for deriving the link set. The UncertainLinkSet itself would contain n-ary relations (WeightedLink) comprising the link and associated link weight. Different subclasses of WeightedLink indicate different interpretations of the link weight (such as probabilistic or ad hoc).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2">Imprecise value vocabulary</head><p>The imprecise value vocabulary would provide a common representation for imprecise values that arise from data set merger, as discussed in 3.2. This would allow republication of merged datasets which explicitly show the variation in source data values. Returning to our example of the population of London the merged set might look like: </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.3">Override graphs</head><p>Finally we suggest the need for override graphs so that one agent can publish retractions and overrides to the link assertions or data assertions made by another.</p><p>The current approach to this, in linked data applications, is to partition data and link sets into named graphs <ref type="bibr">[7]</ref>. For example, rather than include all the co-reference links directly in the same graph as the entity descriptions, we partition them into a separate named graph. In this way a RESTful access can see the union of the relevant graphs but a SPARQL endpoint can support selection of which graphs to include. This allows agents to avoid selected link sets or sub-sources but only at the grain size of the entire graph. To overcome this limitation we suggest extending the VoiD vocabulary to include graph combinators difference, union and replace. So one source can decide which subsets of the data and links to trust, and can then publish the assumptions it is making as a set of deltas over the source graphs. The difference graphs enable per-link and per-assertion changes to be expressed even if the underlying source only publishes the link set or data assertions as monolithic graphs.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Discussion</head><p>Of the issues in section 3 we have suggested an agenda for how to address some of them. The link and imprecise value vocabularies enable publication of link uncertainty (3.1) and value ambiguity (3.2) information in linked data sets. The vocabularies themselves would not remove the uncertainties, nor the problems of estimating them. However, simply having a means to publish this information is already a step forward. The suggested graph combinators would enable an agent to make and publish more selective data combinations, based on its interpretation of link strengths and data values. This does not solve the problems of deciding which parts of which sources to trust, but it does enable more effective sharing of such decisions.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Fig. 1 .</head><label>1</label><figDesc>Fig. 1. PR-OWL main concepts.</figDesc><graphic coords="12,172.88,193.27,261.37,85.44" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Fig. 2 .</head><label>2</label><figDesc>Fig. 2. Procurement fraud detection overview.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 2</head><label>2</label><figDesc>Figure 2 presents an overview of the procurement fraud detection process. The data for our case study represent several requests for proposal and auctions that are issued</figDesc><graphic coords="13,169.76,463.29,267.61,164.41" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Fig. 3 .</head><label>3</label><figDesc>Fig. 3. ProcurementRequirement MFrag.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Fig. 4 .</head><label>4</label><figDesc>Fig. 4. DirectingProcurementByIndexes MFrag.</figDesc><graphic coords="15,151.76,227.60,303.62,79.20" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_5"><head>Fig. 5 .</head><label>5</label><figDesc>Fig. 5. DirectingProcurement MFrag.</figDesc><graphic coords="15,164.96,461.13,277.21,83.28" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_6"><head></head><label></label><figDesc>Suspect procurement (proc1): o ind1 = ILC &gt;= 2.0; o ind2 = ILG &gt;= 1.5; o ind3 = Other &gt;= 3.0. o It demands experience in only one contract. • Non suspect procurement (proc2): o ind4 = IE &gt;= 1.0; o ind5 = ILG &gt;= 1.0; o ind6 = ILC &gt;= 1.0;</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_7"><head>Fig. 6 .</head><label>6</label><figDesc>Fig. 6. Generated SSBN for query IsProcurementDirected(proc1).</figDesc><graphic coords="16,136.16,401.13,333.14,177.61" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_8"><head>Fig. 7 .</head><label>7</label><figDesc>Fig. 7. EnterpriseBusinessNetwork MFrag.</figDesc><graphic coords="17,136.16,227.60,333.62,84.72" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_9"><head>Fig. 8 .</head><label>8</label><figDesc>Fig. 8. OwnerFront MFrag.</figDesc><graphic coords="17,140.24,512.97,326.42,78.24" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_10"><head>Fig. 9 .</head><label>9</label><figDesc>Fig. 9. Knowledge fusion from different Government Offices DBs.</figDesc><graphic coords="18,191.85,193.03,223.21,172.33" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_11"><head>Fig. 2 .</head><label>2</label><figDesc>Fig. 2. Left side: HTML page p; right side: four HTML pages p1, p2, p3, and p4, which encode (completed) semantic annotations for p and the objects on p.</figDesc><graphic coords="23,130.48,263.30,142.87,125.55" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_12"><head>Definition 1 .</head><label>1</label><figDesc>A semantic annotation A a for a Web page or object a ∈ P ∪ O is a finite set of concept membership axioms A(a), role membership axioms P (a, b), and attribute membership axioms U (a, v), where A ∈ A, P ∈ R A , U ∈ R D , b ∈ I, and v ∈ V. A Semantic Web knowledge base KB = (T , (A a ) a ∈ P∪O ) consists of a TBox T and one semantic annotation A a for every Web page and object a ∈ P ∪ O.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_13"><head>Definition 3 .</head><label>3</label><figDesc>Given a Semantic Web knowledge base KB and a positive Semantic Web search query Q(x), an answer for Q(x) to KB is a ground substitution θ for the variables x (which are exactly the free variables of Q(x)) with KB |= Q(xθ). Example 3. (Scientific Database cont'd). Consider the Semantic Web knowledge base KB of Example 1 and the following positive Semantic Web search query, asking for all scientists who author at least one published journal paper:</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_14"><head>Definition 4 .</head><label>4</label><figDesc>Given a Semantic Web knowledge base KB and search query</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_15"><head>Example 4 .</head><label>4</label><figDesc>(Scientific Database cont'd). Consider the Semantic Web knowledge base KB = (T , (A a ) a∈P∪O ) of Example 1 and the following general Semantic Web search query, asking for Mary's unpublished non-journal papers:</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_16"><head>Example 7 .</head><label>7</label><figDesc>Consider the description logic knowledge base KB = (T , A), where the ABox A is as in Example 6 and the TBox T is obtained from the TBox T of Example 6 by replacing the axiom for Researcher by the following axiom: Researcher ≡ GraduatePerson ∃worksFor.ResearchInstitute .</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_17"><head>(and m 12</head><label>12</label><figDesc>(∅) = 0) where the numerator (1 − c) normalizes the values of the combined BBA w.r.t. the amount of conflict c between m 1 and m 2 .</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_18"><head>Fig. 1 .</head><label>1</label><figDesc>Fig. 1. The evidence nearest neighbor procedure.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_19"><head></head><label></label><figDesc>P1. 0 ≤ P(σ | ϕ) ≤ 1. P2. If A* ⊢ σ, then P(σ | A*) P(σ∧τ | ϕ) = 0, then P(σ∨τ | ϕ) = P(σ | ϕ) + P(τ | ϕ). P4. P(σ∧τ | ϕ) = P(σ | τ, ϕ) × P(τ | ϕ) P5. If σ↔τ, then P(σ | ϕ) = P(τ | ϕ), and P(γ | σ, A*) = P(γ | τ, A*) for all γ.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_20"><head></head><label></label><figDesc>relation symbol of L* a function, constant or relation of the correct arity on D. A structure (D, m) implies a truth-value for each sentence of L*. (D, m) is called a model of T* if every sentence in T* is true in (D, m). Models of T* are sometimes called possible worlds for T*. A sentence σ is implied by A* if it is true in all models of A*, and satisfiable if it has a model.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_21"><head>Definition 2 .</head><label>2</label><figDesc>(The Probabilistic DL Learning Problem) Given a set E = E p ∪ E i of observed and unobserved examples E p and E i (with E p ∩ E i = ∅) over the language L E , a probabilistic covers relation covers(e, H, B) = P (e|H, B), a logical language L H for hypotheses, and a background theory B, find a hypothesis H * such that H * = arg max H score(E, H, B) and the following constraints hold: ∀e p ∈ E p : covers(e p , H * , B) &gt; 0 and ∀e i ∈ E i : covers(e i , H * , B) = 0. The score is some objective function, usually involving the probabilistic covers relation of the observed examples such as the observed likelihood ep∈Ep covers(e p , H * , B) or some penalized variant thereof. Negative examples conflict with the usual view on learning examples in statistical learning. Therefore, when we speak of positive and negative examples we are referring to observed and observed ones. As we focus in crALC, B = K = (T , A), and given a target concept C, E = Ind + C (A) ∪ Ind − C (A) ⊒ Ind(A), are positive and negative examples or individuals. For instance, candidate hypotheses can be given by C ⊒ H 1 , . . . , H k , where H 1 = B ⊓ ∃D.⊤, H 2 = A ⊔ E, . . ..</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_22"><head>Fig. 3 .</head><label>3</label><figDesc>Fig. 3. Relational Bayesian network for the Lattes curriculum dataset.</figDesc><graphic coords="78,177.99,116.17,259.06,159.01" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_23"><head>Fig. 1 .</head><label>1</label><figDesc>Fig. 1. BeliefOWL Framework</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_24"><head>:</head><label></label><figDesc>London :population [a :ImpreciseValue; :samplevalue [:value 7700000; :source :s1; :context :y2009] :samplevalue [:value 7900000; :source :s2; :context :y2008] :estimatedValue 785123]</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_2"><head>Table 1 .</head><label>1</label><figDesc>C. d'Amato, N. Fanizzi, B. Fazzinga, G. Gottlob, and T. Lukasiewicz Precision and recall of inductive vs. deductive Semantic Web search.</figDesc><table><row><cell>Onto-Query</cell><cell cols="4">No. Results No. Results No. Correct Results Precision</cell><cell>Recall</cell></row><row><cell>logy</cell><cell cols="2">Deduction Induction</cell><cell>Induction</cell><cell cols="2">Induction Induction</cell></row><row><cell>1 FSM State(x)</cell><cell>11</cell><cell>11</cell><cell>11</cell><cell>1</cell><cell>1</cell></row><row><cell>2 FSM StateMachineElement(x)</cell><cell>37</cell><cell>37</cell><cell>37</cell><cell>1</cell><cell>1</cell></row><row><cell>3 FSM Composite</cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_3"><head></head><label></label><figDesc>���� �������� ����� ������� ���� ����������� ������ ���� ������������� ��������� ���� �������������� ����������� ����������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������� ���� ��������� ���� ��������������� ��� ����� ������� ���� �������� ��� ����������� ��� �������� ��� ���� ����� ������� ������������ ������������ ��� �������������� ����� ����� ����� ������� ���� ��������� ��� ��������� ����������� ����� ��� ����������������������������������������������������������������������������������������������������������� ���������� ������ ������������ ���� ��� ����������� �������� ���� ���������� ��������� ��� ������ ������������� ����� ������������������������������������������������������������������������������������� � �������������������� ������ ������ ��� �� �������� ������ ����� ��������� ����� ������ ��� ���� ����� ��� ����� ������ ���� ������������ ����� ������ ��� ���� ������������ ��� ����� ��� ��������� ��� ��� �������� ��� ���� ������������ ������������� �������� ������������������������������������������������������������������������������������������������������������� ��� ����� ��������� ����������� ������������ ��� ���� �������� �������� ���� ��������� ���� ��� ���� ��� ����� ���� ����������������� � ������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������� ��� �� ��������� ��� ����� ��� �� ���������� ������ ������� ������������ ��� ���� �������������� ��������� ���� ���������� ��� ���� ������������ ������� ����������� ������������� ��������� ��������� ������� ������ ������ �������� ������� ����� ��������� �������� ����� �������������� ��� ������������ ������ ������ ���������������� ��� ������� ��� ������ ����� ���� ���� ������������ �������� �������������� ���� ������ �������� ���� ����� ������ ������������ ����������� �� ������� ��� ���������� ���� ������ ������������ ������ ������ ����������������������������������������������������������������������������� �� ���� ����� ��� �������������� ������������ ������ ��� ������������ ������ ����������� ��� ����� ���� �������������� �������� ������ ���� ������������ ���� �������������� ��������� ��� ������ ����������� ���� ����� ��� ���� ����������� ��� ������������������������������������������������������������������������������������������������������������ ����������������������������������������������������������������������������������������������������������������� ����������� ������� ����� ���� ���� ��� �� ����������������������� ������ ������������ ����� ���� �������� ��������� ���������������������������������������������������������������������������������������������������������� ��� ������� �� ������� �������������� ��� ��������� ������ ���� ���� �������� ��� ���������� ����� ����� ����� ���������� ���������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������� ��� ����� ����� ������ �� ������������ ������������ ������ ������� ���� ���� ������ ��� ������� �������� ������ ���� �� ������������������������������������������������������������������������������������������������������ � ����� � � �� ��� ��������� ��� ��� � �� � �� �� ��� � ��� � ��������� ���� ��������� ����������� ������ �� ����������� ������� ��� ���������������� � �� � ������� � ��� � ���������������������������������� ������������������� ��� �� ����� ����������� ��������� ������ ���� ����� ��� ���������� ���� �������� ����������� ��� ��������������� ������ ���� ��������� ���� ������ ����������� �������� ������� ���� ������� ������ ������ ��� �������� ������ ��� ��� �������� ���������� ����� ��� ����������� ����� �������� ��� �������� �������� ���������� ����� ��� ���������� ������ ���������������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������ ��������������� ������� ��� ���������� ��� �������� ��� ���� ���������� ��� ������ ��� ��������� ��� ����� ����� ���������������� ������� ��� ������� ��� ���� ������������ ��������� ��������� ��� �� ����������� ��������� ��� ���������� �������� ��� �������� ����� ����� ���������� ��� ��������� ��� ���� ���������� ��� ������ ���� ������ ������� ���� ����� ���������������������������������������������������������������������������������������������������� ���������� ������� ��� ��������� ������ ����� ����� ������ ��� �������������� �������� ������� ���������� ���� �������� ��� ����� ���� ��������������� ������ ���� ����������� ����� ����� ��������� ����������� ���� �������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������������� ����� ������ ��� ����� ��������� ����������� ����� ���� �������� ������ ����� ����� ���� ��������� ��������� ����� ��� �������������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������� ��� ��������� ��� ������������ ��������� ���� ��������������� ��� �� ��������� ����� ������ ����� ���� ���� ���� ����� ��� �������� ��������� ������������ ��������� ��� ��������� ���� ������������ ����������� �������� ��� ���� ���� ����������� ���� �������������������������������������������������������������������������������������������������������� ���������������������� �������������������������������������������������������������������������������������������������������� �������� ��� ��� ���������� ��� ���� ������� ��������� �������� ���������� ������������� ��������������� ������������ ���� ������������ ����� �������� ��� ������������ �������� ��������������� �������� ������������� ��� ������� ������ ������� �������� ����������� ����� ����� ����������� ��� �������� ��� ���� ������ ���� ���������� ���� ��������������� ��������� ������������ ������������� ��������� ��������������� ������������ ����� ��� ���������� ����� ������� ��� ����������������������������������������������������������������������������������������������������������� ����������������������������������������������������������������������������������������������������������� ���� ��� ������ ��� ������������ ��������� ��� ������ ������� ��������� �������� �������� ������� ����� ������ ���� ������������ �������� ��� ���� ���������� ��� ��������� ��� ��������� ���� ����� ����������� ������� �������� ��� ��� ��������������� ����������� ���� ��� ����������� ������ ����������� ����� ��� �������� ���� ��������� ������ ������ ������������������������������������������������������������������������������������������������������ ��������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������ ������������������������� � ��������������������������������������������������������������������������������� ������ ����� ��� ������� ���� ����� ������ �������� ��� ���� ����������� ��������� ���� �������������� ��� ���� �������������������������������������������������������������������������������������������������� ���� ��� ��������� ��� ������ �� ���� ��� ���� ���� ��� ���� ��������� ��� ��� ��������� ���������� ���� ������������ �������� ���� ����� ����� ��������������� ������ ������ ���� �������� ����� �������� ���� ��� ������ ���� �������� ��������� ��� �� ���������� ���������� ���� ������� ��������� ��� �� ������ ���������� ��� ��������� ����� �� ���������� ��� ��������� ��������� ���� �������� ������� ����� ��� ������� ���� ��������� ��� ��������� ��� ������� ��� ���������������������������������������������������������������������������������������� ������������������������� ������������������������������������������ ���������������������������������������������������������������� ��������������������������������������������������������������������������� ��������������������������������� ������� ������ ���� ��� ����� ��� �������� ���������������� ���� ����� �������� ���������� ������ �� ���������� ���� ������������ ��� �� ������� ���� ���� ���� ��������� ��� ������� ��� ��� ������� ��� �� ��������� ���� ���� �� ����������� �������������������������������������������������������������������������������������������������������������� ����������������������������������������������������������������������������������������������������������������� ��� ���������� ��� ���������� ��������� ��� ����������� ������ � � ��� ���� ����� ������� � � �� ����� ���������� �������� ��� ��������������� � ������������������������������������������������������������������������������������������������ ���������������������� �������������� � �������������������������������������������������������������� ����� ���������� ��� �� ������������� ������ ����� ���� ������ ������������ ��������� ������� ������ ���� ������������ ��� �� ��������������������������������������������������������������������������������������������������������������� ����������������������������������������������������������������������������������������������� ������������������������������������������������������������������ ���� �������������� �� � ��� � �� � � ��� ���� � ��� �� � � ������ � ��� � �� � ���������� � ��� � �� � ���� ����������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������� � ������������������������� � �������� ����������� �������� ���� ���� ���������� ���� ��������� ����� �� ������ ����� ��� �� ����� ����� ���� ���������� ���� ������������ ����� ��� ���� ����������� �������� ���� ������ ����������� ���� ���������� ���� �������������� ��� ��� ��������������������������������������������������������������������������������������������������������� ���� ������ ��������������� ��� ���� ���� ���� ��������� ��� ���� ��� ������������� ���� ��� �������� �� ���� ��� ��� ��������������������������������������������������������������������������������������������������������� ������������������������������������������������������������ � ������������������� � �� �������������������������� �� �� ������������ ��� � � � �� ����������� ��� ���� ��� � � � �������������� ����� ����� ��� �������������������������� � � ���� � �� � � ��� ����� � �� � �������� �� �� � ������������ � �� �� ����</figDesc><table><row><cell>������������������������������������ ����������������������������������������������������������������������������������������������� ������������������������������������������� ���� ����������� ��� ����������� ���� ������������� ���� ���������� ������� ����� ����������� �������� ���� �������� �� �������� ����� ��� ����������� ��� �������� �������� ������ ���������� ������������� ��������� ���� �������������� ������� ���� ����� ��������� ��� ������������ ������ ���� ������� ������ ��������� ������ �������� ��������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������� ������������ ����� ������ ���������� �� ���������� ���� ���������� ��������� �������� ����������� ��� �������������� ����� ����� ������������ �������� ������� ���� ��������� ���� ������� ��� ����������������� ���� ��������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������ ������� �� ������� ������ ��� ������������� ��� ������ ��� ��������� ��������� ������������� ������� ��� ���� ��������� ������������� ��� ����� �������� ���� ����� ���� ��� �������� ��� ��������� ���������� ��� ������������ ���� ������������ ��� ���� ���������� ��� ����������� ��� ��������� ���� �������� ���� �������� ����� ������ ���� ��� ����������������������������������������������������������������������������� ������������������������������������������������������������������������������������������� ���������������� ��������������������������������������������������������������������������������������������������������� ����������������������������������������������������������������������������������������������������������� ������������ ���� ����������������� ��� ���� ����������� ������������� ������ ����������� ����� ��� ������ ������ ������������������������������������������������������������������������������������������������������ ������ ��� �������� ��������� ���� ������� ������� ����������������� �������� ����� �������� ��������� ���� ����� ����������� ��� �������� ��� ��������� ���� ��������� ��� ��������� ���������� ������ ������������� ���������� ��� ���������� ���������� ���� �������� ��������� ����������� ������ ������� ��������� ��������� ���� ��������� ���� ����������������������������������������������������������������������������������������������������������� ����������������������������������������� ��� �������� ��� ���������� ���� ���������� ��� ��������� �������� � � ����� ��������� ������ ����� ��������� ����� ������� ��������� ������ ���� ��������� ������������� ������ ���� ���� �������� ��� ����������� ���� ������������ ��� ������������������������������������������������������������������������������������������������������������� ��� ������������� ��� ���� ������������ ��� ���� ���������������� ��������� ����� ������ ���������� ������ ����������� �������� ���� ���������� ��� ����� ���������� ��� ���� ����� ����� �������������� ��� ���� ���������� ���� ��������������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������������� ������� ��� �� ������������ ��� ������������ ���� �������������� ������������ ����� ��� ������� ����� ��� ��������� ����������� �������� ������ ��� ����� �������� �������� ��� ���� ���������� ������ ���� ���� ������������ ���������� ������������������������������ ������������������������������ � � �������������������������������������������� �� ��������������������������� � ��������������������������������������������������������������������������������������������������������������� � ��������������� � � � ������� ������ � � ������ � ���������� � � � ������ ������������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������������� ������������������������ �������������������������������������� ���������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������� ���� ����� ������� ��� ���� ��� ������� ��� ������������ �������� ��� ��������� ����������� �������������� ������� ���� ������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������� ���� ������������ ������� �������������� ��������� ��������� ���� ����� ����� ��� ������� ���� ���� ��������� ������������������������������������������������������������������������������������������������ ������������������������������������������������������������������������������������������������������������ ���� ������� ������ ����������� ������ ������� ����� ��� �� ���������� ��� ���� ������ ��� ��� ������ ��� ��� ������� ������ ������������������������������������������������������������������������������������������������������������ ���������������������������������������������������������������������������������������������������� ����������������������������������������������������������������������������������������������������� ��� ���� ����� �������� ������� ������������ �������������� ����� ����� ����� ��� ������������ ������������ ��� ���� ������������������������������������������������������������������������������������������������������ ���������� ������ ������ ������������������������������� ������ �������������������������������������� ��������������������������������������������� ������������������������������������������������������� ����������������������������������������������������������������������������������������������������� ���������� ���� �������������� ������������ �������� ������ ������ ���� ������������� ������������ ���� ������������������������������ ������������������������������ � � ����������������������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������ � ������������������������������������������������������������������������������������������������������������������ ��������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������ ��������������������������������������������������������������������������������������������������� ����������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������� ��������� ��������������� ������� ������������������������������������������������������������������������������������������������������������ �������������������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������ ������������������������������������������������������������������������������������������������������������ ������������������������������������������������������������������������������������������� ������ ���������� ��������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������� � ����������������������������������������������� ����������������������������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������� ������������������������������ ������������������������������ � � ������������������������������������������������������������������������������������������������������������������������ � ����������������������������������������������������������������������������������������������������������������������� ����������������������������������������������������������������������������������������������������������� ������������������������������ ������������������������������ � � ������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������ ���������������������������������������������������������������������������������������������������������� ����������������������������������������������������������������������� � � ������������������������������������������������������������������������������������������������������������������������� ����������������������������������������������������������������������������������������������������������� ������������������������������ ������������������������������ � ����������������������������������������������������������������������� ��������������������������������� ��� ��������� ������ ������ ������ ������ �� �� �� �� �� ����� ���� ��������� ��������� ������ ������ ���������� ��� �������� ������ ������ ������ ������ �� �� �� �� �� ����� �� ��������� ��������� ������ ������ ������������ ��������������� � ������������ ��� ������ ���� ����������� ��������� ������ ����� ��� �� �������� ��� �������� ��� ���������� ��� ������ ��� �� ���� ��� ������������ �� �� �� ���� �� �� �� �� �� ����� �� ��������� ��������� ������� �������� ����������� ��� �������� ������ ������ ������ ������ �� �� �� �� �� ���� ������ ��������� ��������� ������ ������ ������������ ��� �� �� ��� �� ����� �� ������������ ��� ��� ������ �������� ���� ���� ��������� ��������� ������������������������������������������������������������������������������������������������������������� �� ������������ ��� �� �� ��� �� �� �� �� �� ���� �� ��������� ��������� ������� ����� ��������� �� �������������� ��� ��� ��� ���� �� �� �� �� �� ���� ������ ��������� ��������� ������� ����� ����������� �� ���������� �� �� ��� ���� �� �� �� �� �� ���� �� ��������� ��������� ������� ����� �������� �� ������������ ��� ��� ��� ������� �� �� �� �� �� ���� ���� ��������� ��������� �������������� ������ �� ������ ��� ��� ��� ������ �� �� �� �� �� ���� ���� ��������� ��������� �������������� ������ ��� ������������ �� �� �� ���� �� �� �� �� �� ����� ������ ��������� ��������� ������� ����� ����������� ��� ������������ �� �� �� ���� �� �� �� �� �� ����� ������ ��������� ��������� ������� �������� ����������� ��� ������������ �� �� �� ���� �� �� �� �� �� ����� �� ��������� ��������� ������� ����� ����������� ��� ������������ �� �� �� ���� �� �� �� �� �� ����� �� ��������� ��������� ������� ����� ����������� ��������������������������������������������������� � ��������������������������������������������������������� ��������� � �������������������������������������������������������� � � ������������������������������������� �������������������������������������������� �� ��������������������������������������� � � ����������������������� �� ����������������������������������������������������� � � �������������� ������� ����� ������������������������ ����������������������������������������������������������������������������������������������������������� ������������ ��� ������ �� �������� ������ ��� �������������� ��� ��������� �� ������ ��� ������������� ��� �������� ��� ��������� ��� ���� ������������ ��� ���� ��������� ������� ��� ��� ���� ������� �� ����� �������� ���������� ������������� �������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������� �� ������������ �� �� �� �� �� �� �� �� �� ���� �� �������� ��� ��� ��� ����� �� �� �� �� �� ���� �� ��������� ��������� �������������� ��������� �� ������������ �� �� �� �� �� �� �� �� �� ���� ������ ��������� ��������� ������� ����� ����������� �� ������� ��� � ���� �� ������� � ���� ����� ��������� ��������� �������������� ��������� ������������������������������������������������������������������������������������������������������������� ����������� �������� ������� ������ ������� ������� ���� ����� ����� ����� ������� ��� ����� ��������� �� ��������� �� ��������� �� �� �� �������� ��� �� �� ���� �� �� �� �� �� ��� ������ ��������� ��������� �������������� ���������� ������ � �� ������� ��� � ����� ���� ������ ������������������������������������������������������������������������������������������������������������� ����������������������������������������������������������������������������������� ��������� �������������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������� �������������������������������� ��������������������������������� � � ������������������������������������� � ������������������������������������ �������������������������������������������������������������������������������������������������������������� � � �� ��������������������������������� ���������� � �� ���� � ������������ � � � � ��� �� ����� � ���������� � � � ���� � ������������������������������������������������������������������������ ������������������������������� � �������������������������������������������������������������������������� ����������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������������� ��� ������������ �������� ����������������������������������� ������������������������ ������������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������� �� �������������������������������������������������������������� ���������������������������������������������������� ����������������������������������� ���� ������������ �������� ��������� ��������� ������ ������ ���� �������� ������� ������� ���� �������� ��������� ������������ ��� ��������� ��������� ��������� ���������� ��� ������������ �������������� �������� ����������� ��� �������������������������������������������������� � ������������������������������������������������������� �������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������� � � �� ��������������������������������������������������� �������� � �� ������������������������ � � ��� ���������������������� � ���������������������������������� � � � � ���������������������������������������������������� � ���� ���� ��������������������������������������� ������ � �� ��� ��� � � �� ���������������������� � � ���������������������� ��� � � ��������������������������������������������������������������������������������������������������� � � ��������� ��� ������ � � �� � � ��� ������ ��� ���� ���������� ��� � � � ��� ������������ � � � � ���� ���� ��� ������� ��� ��� ������� ��� ����������������������������������������������������������������� ��������������������������������� � ��� � ������ � ���������������������������������� � ���� � ����������������������������� ����������������������������������������������������������������������������������������������������������� ����������������������� � � ��� ����� ��������� ��� ���������� �������������� ������ ��� ���� ������������� ��� ���������������� �������� ��������� ���� ��� ������ ���� ����� ������� ����� ��� ���� ������ ��� ��� ����� ������� �������� ������� ����� ������ ������ ��������������������������������������������������������������������������������������������������������� �������������������������� � ������������������������������������������������������������������������������� �� ��������������������������� ������������������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������������� ���������� ������� ������ ��� ���� ������������ ������ ���� ���� �������� ���������� ��� ����� ����������� ����� �� ��� ���� ����� ������ ����� ������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������� � ���� � ���� � ����� �������������������� � ������������</cell></row></table><note>� � ������</note></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_4"><head></head><label></label><figDesc>������������ �������� ����������� ����� ��������� �������� ������ ��� ��������� ���� ���� ���� ���� ��������� ���� ����������� ���� ������� ����� ����������� ����� ������� ����� ��� ����������� ���� ���������� ������ ��� ��������� ��� ����������� ��������� ��������� ���� ���� ��������� ��������� ��� ���� ����� ���� ���������� �������� ���� ���������������������������������������������������������������������������������������������������������������� ���� ����������� ��� ����� ����� ������� ������������ ���� �������� ���� ����������� ������������ ����� ������� ��������� ����������� ������ ������ ���� ������� ����������� ��� ������ ��� ��������� �������� ��� �� �� ����������� ����������� ��������� ����������� ����������� ���� ������� ���� ��� ��� ������� ��� ����� ����� ����� ����������� �� ��������� ������ ���� ����� ��������� ����� �������� �������� ��� ������������ �� ���� ��������� ��� ���� ���������� �� ������� �������� ���� ��� ����� ������� ���� ��������� ��� ������ ����� ��� �������� ������� �������� ���� ��������� ���� ���������������������������������������������������������������������������������������������������������� ����� � �� � ������� � �� � ������� � �� � ������� � �� � �� ����� ��� ��� � ���������� ����� � ��� � ������� � �� � ������� � �� � ������� � �� � �� ����� ��� ��� � ������������ ����� � ��� � �� ����� ���� ��� � ����������� ����� �� ��� �� �� ������������ ����� � �� � ������� � �� � ��� ����� ��� ��� � ������������ ����� � �� � ������� � �� � ������� � �� � ������� � �� � ������� � �� � ������� �� �� �� �� ����� ��� ��� � ���������� ����� � �� � ������� � �� � ������� � �� � ���������� �� �� ������� �� �� �� �� ����� ���� ��� � ������������ ����� �� �� �� ������� �� �� �� �� ����� ���� ��� � ������������ ����� �� �� �� ������� �� �� �� �� ���������������������������������� ���� �������� ��� ����������� ������� ����������� ����� ����������� ���� ������������ �������������� ���� ����� ��� ����������� ������������ ���� ����� ��� ��������� ��� �������� ���������� ��� ���� ���������� ���� ��������� �������� ��� ������������ ������������� �������� ����� ���������� ���� ���� ��������� ������������ ���� ��������� ���� ������� ��� ������� �������� ���� ������� ������� �������� ��� ������� ���� �������� ������������� �������� ���� ���� ���� ���� ����������� �������� �� ������������ �������� ������ ���� ���������� ���� ���� ������ ������������ �� �������� ����� ��� ��� �������� ���� ��� ����� ��������� ����� ������� ��� ������� ���� ������ �������� ������������� ����� ��������� ��������� ��� ������� �������� ���� ��������� ������������ ������� ��� �������� ���� ������� ��� �� ������������������������ ���������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������������� ��������� �������� ����������� ������ ���� ������ ��� ����� ��������������� �� ������ ����������� �������������� �� ����� ��� ����������� ������������ ����� �� ����������� ��� ���� ������������ ���������� ���� ��������� ��� �� �������� ����������������������������������������������������������������������������������������������������������������� ����� ��� ������ ��� ���� ������� ����������� ����� �� ����������� ���������� ���� ������� ��������������� ���������� ��� ����� ������� ���������� ��� ������������ �������������� �������� ��������� ��� ������ ������� ��� ������ ����������� ����� ��� �������� ������ ����� ��� �� ������� ������� ���� ��� ����������� ��� ������������ �� ��������� ���� �������� ���� ��������������������������������������������������������������������������������������������������������� ����� ����� ���� ����� ������ ���� �� ������� ���� ��� �� ����� ��� �� ������� ����� ���������� ��� ���������� ������� ���� ��������� ��������� ���� �������� ����������� ������ ����� ��� ��� ��������� ����� �������� ������� ��� ������� �������� ���������������������������������������������������������������������������������������������������������</figDesc><table><row><cell>���� ������������ ��������������� �������� ����� ��������� ������ ��� ��� ������� ���� �������� ���������������������������������������������������������������������������������������������������������� ��� ���� ���� ����� ��� �������� ����� ���� ��������������� ��� ���� ��������� �������� ��� �������� ���� ������ ��� ����� ����������������������������������������������������������������������������������������������������������� ���� ������������ �������� �������������������������������������������������������������������� �������������� ������������������������������ ������������������������������ � �� � ����������� ����� ��� ���������� ��� ���� ������������ ���� ���������� ����������� ��� ��������� �������� �������������� ��� ���� ������������������� ���� ����� ��� ���� ������� ��� ������� ��� ������������� ���� ���� ��������� ��� ���� ������ ��� ���������� �������� ������������������������������������������� � ����������������������������������������������������������������������������������������������� ���������������������� ���������������������� ��� �� �������� ��������� ���� ������ �� ��������������� ����� ��� ��� ����� ���������� ����������� ��� ����������� ������ ������������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������� ���� ����������� ��������� ��� ���� �������� ����� �������� ������ ��� ��������� ���� ������� ������� ����������� �������������������������������������������������������������������������������������������������������������� �������������������������������������������������������� �� ����������������������������������������������� ���������������������������������������������������������������������������������������������������������������� ����� ������ ���� ��������������������������������������� � ������������������������������ ������������������������������ � �� ������������������������������������������������������������� ����� ��� �� � ������������ �������������������������������������������������������������� ����� ��� �� � ���������� �������������������������������������������������������������� ����������������������������������������������������������������������������������� �� ���� ��������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������ ������������������������������������������������������������������������������������������������ ��� ������ �� �� �������� ����� ���� �� � ������������ �������������������������������������� �� ����� �� ����� ��� �� �� ������������ ���������������������������������������� �� ����� ��� ���� ������������������������������������������������������������������������������������������������������������ �������������� ����������������������������������������������������������������� �� �� ���������� ���������������������������������������� �� ����� �� ����� ������������������������������������������������������������������������������������������������������������� �� �� ���������� ������������������������������������� �� ����� ��� �� �� ������������ �������������������������������������� �� ����� ��� ������������������������������������������ ��������������� ���� ������ ���� ������ ���� ����� ���� ���� ���� �� ��������� ������������ �������� ���������� ��������� ��������� ����� �� �� ������������ ���������������������������������������������������� ��������������������������������������������������������� �� � ������������� �������������������������������������� �� ����� ��� �� ����� ��� �� �� ����������� ���������������������������������������������������� ������������������������������������������������������������������������������������������������������������ ����� ������� ������ ���� �������� ������� ���� ������� ��� �� ��������� ���� ����������� �������� ��������� ���� ������ �� � ������������ �������������������������������������� �� ����� ��� �� � ������������ ������������������������������������������������� ����� ��� �� � ������������ �������������������������������������������������� �� ����� �� �� � ���������� ������������������������������������������������� �� ����� �� �� � ���������� ������������������������������������� �� ����� ��� �� � ���������� ������������������������������������� �� ����� ��� �� � ������������ �������������������������������������� �� ����� ���� �� ����� ��� ���������������������������������������������������������������������������������������������������������� ����������������������������������������������������������� �� �� ����������� ���������������������������������������� � ���������������������������������������������������������������������������� �� ����� ���� ����� �������������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������������� ����������� ��������� ���� ������������������ ������� ������ ������������ ��� ���� ����������� ��� ��� ����� ��������� ���� ������ ������ ����������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������� ����� �������������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������� ���������� ����� ������������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������� �� �� ����������� ������������������������ � ���������������������������������������������������������������������������� � � �� ����� ���� ���������� ����� ���� ��� � ����������� ����� � �� � �� ����� ���� ��� � ��������������������������������������� � ������������������������������������������������������������������������������������������������������� � ��������������������������������������������������������������������������������������������� ��� ������ �� �� �������� ����� ��� �� � ������������ �������������������������������� �� ����� ��� �� � ���������� ��������������������������� ����� �� �� � ������������ ��������������������������������������������������������������� �� ����� ��� �� � ����������� ��������������������������������������������������������������� ����� ��� �� � ������������ ���������������������������������������������������������������� �� ����� ��� �� � ���������� ���������������������������������������������������������������� ����� ��� �� � ������������ �������������������������������������������������������������� �� ����� ��� ��������������������������������� ����������������������������������������������������������������������������������������������������� �� � ����������� �������������������������������������������������������������� ����� ���� ������������������������������������������������������ ���������������� ����������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������������� ����������������� ����� ������������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������� ����� ����������������������������������������������������������������������������������������������������������� ���������������� ����� ���������������������������������������������������������������������������������������������������������� ���������������� ����� ���������� ���� ���������� ���� ��������� ���������� ��� ���� �������� ������������ ������� ��� ������������ ��� ���� ���� ������������������������������������������������������������������������������� ������ �������������������������������������������������������������������������������������������������������������� ������������������������������������������������������ ��� � ������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������ ����������������������������������������������������������������������������������������������������������� ������������������������������������������������������������������������������������������������������������ ������������� �������� �� ������������ �� ������������� ���� �� ������������ ��� ���� ������� ��� ���� �������� ��� ���� ���� ������������������������������������������������������������������������������������������������������������ ����� �� ��������� �������� ���� �������� ������������ ��� ������ ��������� ����������� ����������� ������ ���� ����� ��������������������������������������������������������� ������������������� ������������������� � � ������ � ������ ������ � ����������� ������ � � ������������ ����� �������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������� ����� ������������������������������������������������������������������������������������������������������������ ������������������� ����� ���������� ���� �������� ���� ���������� ���� ������������ ���� ��� ������������� ��� ���� ������� ���� �������� ��� ����� ���� ���������������������������������������������������������������������� ����� ������� ���� ������� ���� �������� �� ��������� ������ ���������� ���� ����� ������ ������������ ��� ���� �������������� ����������� ��� ������� ��������� ��� ������������ ��������� ���������� ������� ���� ������� ���� ������ ���� ����� ��������������� ����� ������������������������������������������������������������������������������������������������������������ ����������������������������������������� ����� ����������������������������������������������������������������������������������������������������������� ����� ���������� ���� �� ������ ������������ ������ ���� ���� ��������� ����� ��� ������ ������ ���� ���� ��������� ����� ��������������������������������������������������������� ����� ���������� ���� �� ������ ������������ ������� ��� ������������ ��� ���� ����� ��������� ����������� ��� ����������� ����������������������������������������������� ����� ���������� ���� ���������� ������� ������ ������������ �������� �������� ��� ����������� ������������� ���������� ���� ���� ����������������� �������� �������� ������ ���������������������������������������������������������������������������������������������������� ����� ������� ���� ���������� ���� ���������������� ���������� ��� ��������� ������ ��� ��������� ������� ��� ������������� ���� �� �������������������������������������������������������������������������������� ���� ����������� ��� ��������� ������ ���������� ����� ���������� � ���� �������� ������ ����� ��� ������ �������� ����� �������������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������� ������������������������������������������������ � ������������ � �������������� � �� ������ � ����� �������� ������ ���������� ���� ������������ ��� ������ ���� ���� ��������� ����� ���� ����������������� ��� ������������ ������ ���������������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������������������������������������������� ����� ���������������������������������������������������������������������������������������������������������������� ����������� ����� �������� ����� ������� ���� ����� �������� ���������� �������� ��������� ������ ���� ������������ ���� ��������� ������� ��������������� ���� ���� ���������� ���� ����� ����� ��������� ���� �������� ��������� ��� ����������� ����� ��� ������������ ��� ������� �� ���� ���� ����� ���� ������������ �������� ���� ������� ���� ������ ����������� ���� ���������� ������������������������������� ���� ��������������������������������������������������������������������������������������������������������������� ����� ������������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������������� ��������������������������������������������������������������������������������������������������������������� �������������������������������������������������������������������������������������������������������������� ���� ������� ���� ������ ���� ����������� ���� ������ ���� ��������� ��������������� ��������� ����� ���������� ���� �������� ���� ����� ����������� ����� ��� �� ������������ ��� ����� ����� ��� ������� ��� ���� ����������� ��������� ����� �������������������������������������������������������������������������������� �������� �������� ���� ����������� �� ������� ��� ��������� ������ ����� �������� ������������� ��� ����������� ������ ���� ���������� ���� ��������� ���� ������������ ��� �������� ���� ��������� ������� ��� �������������� ���������� �������� ������������ ��������� ��� ���� �������������� ������� ���� ���� ����� ��������� ��� ������������ ������������ ������� ���� ������������������������������������������������������������������������������������������������������������ ������������������������������������������������������������������������������� �������������������������������������������������������������������������� ����� �������������������������������������������������������������������������������������������������������� ���� ����������������������������������������������������������������������������������������������������������� ���������������������������������������������������������������������� ��������������������������������������������������������������</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_5"><head></head><label></label><figDesc>containing assertions on concept C. Output: induced concept definition C. Compute hypotheses C ′ ⊒ H1, . . . , Hn based on refinement operators for ALC logic Let h1, . . . , hn be features of the probabilistic Noisy-OR classifier, apply the EM algorithm For all hi Compute score ep∈Ep covers(ep, hi, B) Let h ′ the hypothesis with the best score According to h ′ add H ′ to C Until score({h1, . . . , hi}, λi, E) &gt; score({h1, . . . , hi+1}, λi+1, E) Fig. 2. Complete learning algorithm.</figDesc><table><row><cell>Repeat</cell></row><row><cell>Initialize C ′ = ⊥</cell></row></table></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_0">http://www.mindswap.org</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_1">http://protegewiki.stanford.edu/index.php/Protege Ontology Library</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_2">A possible way for determining the wi is to assign a high value if the corresponding feature concept reflects high information content, low value otherwise (see[10]  for more details).</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_3">Precisely, the body of evidence must contain consonant focal sets, i.e. when the set of focal elements is a nested family[2].</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_4">It is well known that a typed first-order logic can be re-expressed via a syntactic transformation as an untyped logic (cf.,[7]).</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_5">Gaifman's domain of interpretation included the constants of the original language as well as the added constants; the original constants are then self-interpreted. This requires that no two constants be equal, an assumption we do not make.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_6">Open World Assumption mantains that an object that cannot be proved to belong to a certain concept is not necessarily a counterexample for that concept[14].</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_7">A similar assumption is adopted in the nFOIL algorithm<ref type="bibr" target="#b97">[26]</ref>.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="3" xml:id="foot_8">http://clarkparsia.com/pellet/.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="4" xml:id="foot_9">http://lattes.cnpq.br.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="5" xml:id="foot_10">Indexes 0, 1 . . . n represent individuals from a given domain.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="6" xml:id="foot_11">Similarity was carried out by applying a "LIKE" database operator on titles.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="3" xml:id="foot_12">http://www.w3.org/2001/sw/webOnt</note>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Acknowledgments. Rommel Carvalho gratefully acknowledges full support from the Brazilian Office of the Comptroller General (CGU) for the research reported in this paper, and its employees involved in this research, especially Mário Vinícius Claussen Spinelli, the domain expert.</p><p>Acknowledgments. Georg Gottlob's work was supported by the EPSRC grant Number EP/E010865/1 "Schema Mappings and Automated Services for Data Integration." Georg Gottlob, whose work was partially carried out at the Oxford-Man Institute of Quantitative Finance, gratefully acknowledges support from the Royal Society as the holder of a Royal Society-Wolfson Research Merit Award. Thomas Lukasiewicz's work was supported by the German Research Foundation (DFG) under the Heisenberg Programme.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgments</head><p>Grateful acknowledgement is extended to anonymous reviewers who provided helpful comments on previous drafts of some material contained in the present paper.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgements</head><p>The first author is supported by CAPES. The second author is partially supported by CNPq. The work reported here has received substantial support through FAPESP grant 2008/03995-5.</p><p>Acknowledgement : this work was partly funded by the FP7 BISON (Bisociation Networks for Creative Information Discovery) project, number 211898.</p></div>
			</div>

			<div type="annex">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>An Algorithm for Learning with Probabilistic Description Logics</head><p>José Eduardo Ochoa Luna and Fabio Gagliardi Cozman</p><p>Escola Politécnica, Universidade de São Paulo, Av. Prof. Mello Morais 2231, São Paulo -SP, Brazil eduardo.ol@gmail.com, fgcozman@usp.br</p><p>Abstract. Probabilistic Description Logics are the basis of ontologies in the Semantic Web. Knowledge representation and reasoning for these logics have been extensively explored in the last years; less attention has been paid to techniques that learn ontologies from data. In this paper we report on algorithms that learn probabilistic concepts and roles. We present an initial effort towards semi-automated learning using probabilistic methods. We combine ILP (Inductive Logic Programming) methods and a probabilistic classifier algorithm (search for candidate hypotheses is conducted by a Noisy-OR classifier). Preliminary results on a real world dataset are presented.</p><p>its states) we have one child A ′ j that has assigned a conditional probability distribution P (A ′ j |A j ). Variables A ′ j , j = 1, . . . , k are parents of probability of the class variable C. P M (C|A ′ ) represents a deterministic function f that assigns to each combination of values (a ′ 1 , . . . , a ′ k ) a class c. A generic ICI classifier is illustrated in Figure <ref type="figure">1</ref> .</p><p>The probability distribution of this model is given by <ref type="bibr">[12]</ref>:</p><p>where the conditional probability P M (c|a ′ ) is one if c = f (a ′ ) and zero otherwise.</p><p>The Noisy-OR model is an ICI model where f is the OR function:</p><p>The joint probability distribution of the Noisy-OR model is</p><p>It follows that</p><p>Using a threshold 0 ≤ t ≤ 1 all data vectors a = (a 1 . . . , a k ) such that</p><p>The Noisy-OR classifier has the following semantics. If an attribute A j is in a state a j then the instance (a 1 , . . . , a j , . . . , a k ) is classified as C = 1 unless there is an inhibitory effect, with probability P M (A ′ j = 0|A j = a j ). All inhibitory effects are assumed to be independent. Therefore the probability that an instance does not belong to class C (C = 0), is a product of all inhibitory effects j P M (A ′ j = 0|A j = a j ). For learning this classifier the EM-algorithm has been proposed <ref type="bibr">[12]</ref>. The algorithm is directly applicable to any ICI model; in fact, an efficient implementation resort to a transformation of an ICI model using a hidden variable (further details in <ref type="bibr">[12]</ref>). We now shortly review the EM-algorithm tailored to Noisy-OR combination functions.</p><p>Every iteration of the EM-algorithm consists of two steps: the expectation step (E-step) and maximization step (M-step). In a transformed decomposable model the E-step corresponds to computing the expected marginal count n(A ′ l , A l ) given data D = {e 1 , . . . , e n } (e i = {c i , a i } = {c i , a i 1 , . . . , a i k }) and model M:</p><p>where for each (a ′ l , a l )</p><p>Assume a Noisy-Or classifier P M and an evidence C = c, A = a. The updated probabilities (the E-step) of A ′ l for l = 1, . . . , k can be computed as follows <ref type="bibr">[12]</ref>:</p><p>where z is a normalization constant. The maximization step corresponds to setting</p><p>, for all l = 1 . . . , k.</p><p>Given the Noisy-OR classifier, the complete learning algorithm is described in Figure <ref type="figure">2</ref>, where λ denotes the maximum likelihood parameters. We have used the refinement operators introduced in <ref type="bibr">[3]</ref> and the Pellet reasoner 3 for instance checking. It may happen that during learning a given example for a candidate hypothesis H i cannot be proved to belong to the target concept. This is not necessarily a counterexample for that concept. In this case, we can make use of</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Position Papers</head><p>BeliefOWL: An Evidential Representation in OWL Ontology Amira Essaid 1 and Boutheina Ben Yaghlane 2</p><p>1 LARODEC Laboratory, Institut Supérieur de Gestion de Tunis essaid amira@yahoo.fr 2 LARODEC Laboratory, Institut des Hautes Etudes Commerciales de Carthage boutheina.yaghlane@ihec.rnu.tn</p><p>Abstract. The OWL is a language for representing ontologies but it is unable to capture the uncertainty about the concepts for a domain.</p><p>To address the problem of representing uncertainty, we propose in this paper, the theoretical aspects of our tool BeliefOWL which is based on evidential approach. It focuses on translating an ontology into a directed evidential network by applying a set of structural translation rules. Once the network is constructed, belief masses will be assigned to the different nodes in order to propagate uncertainties later.</p><p>Author Index</p></div>			</div>
			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<monogr>
		<title level="m" type="main">Semantic web for the working ontologist modeling in RDF, RDFS and OWL</title>
		<author>
			<persName><forename type="first">D</forename><surname>Allemang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">A</forename><surname>Hendler</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2008">2008</date>
			<publisher>Elsevier</publisher>
			<pubPlace>United States</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">MEBN logic: A Key Enabler for Network Centric Warfare</title>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">C G</forename><surname>Costa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Takikawa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Pool</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Fung</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">J</forename><surname>Wright</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 10th International Command and Control Research and Technology Symposium (10th ICCRTS)</title>
				<meeting>the 10th International Command and Control Research and Technology Symposium (10th ICCRTS)</meeting>
		<imprint>
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Virginia</forename><surname>Mclean</surname></persName>
		</author>
		<imprint>
			<pubPlace>USA</pubPlace>
		</imprint>
	</monogr>
	<note>CCRP publications</note>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Hypothesis Management in Situation-Specific Network Construction</title>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">M</forename><surname>Mahoney</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Wright</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Uncertainty in Artificial Intelligence: Proceedings of the Seventeenth Conference</title>
				<meeting><address><addrLine>San Mateo, CA</addrLine></address></meeting>
		<imprint>
			<publisher>Morgan Kaufman</publisher>
			<date type="published" when="2001">2001</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Probabilistic Ontologies for Knowledge Fusion</title>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">C G</forename><surname>Costa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Janssen</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 11th International Conference on Information Fusion</title>
				<meeting>the 11th International Conference on Information Fusion</meeting>
		<imprint>
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Of Klingons and Starships: Bayesian Logic for the 23rd Century</title>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">. C G</forename><surname>Costa</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Uncertainty in Artificial Intelligence: Proceedings of the Twenty-first Conference (UAI 2005)</title>
				<meeting><address><addrLine>Edinburgh, Scotland</addrLine></address></meeting>
		<imprint>
			<publisher>AUAI Press</publisher>
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">MEBN: A language for first-order Bayesian knowledge bases</title>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Artificial Intelligence</title>
		<imprint>
			<biblScope unit="volume">172</biblScope>
			<biblScope unit="page" from="140" to="178" />
			<date type="published" when="2008-02">2008. February 2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Constructing Situation Specific Belief Networks</title>
		<author>
			<persName><forename type="first">S</forename><surname>Mahoney</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 14th Annual Conference on Uncertainty in Artificial Intelligence (UAI-98)</title>
				<meeting>the 14th Annual Conference on Uncertainty in Artificial Intelligence (UAI-98)<address><addrLine>San Francisco, CA</addrLine></address></meeting>
		<imprint>
			<publisher>Morgan Kaufmann</publisher>
			<date type="published" when="1998">1998</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">On Case-Based Knowledge Sharing in Semantic Web</title>
		<author>
			<persName><forename type="first">H</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Wu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Tools with Artificial Intelligence</title>
				<meeting><address><addrLine>Los Alamitos, CA, USA</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE Computer Society</publisher>
			<date type="published" when="2003">2003</date>
			<biblScope unit="volume">0</biblScope>
			<biblScope unit="page">200</biblScope>
		</imprint>
	</monogr>
	<note>IEEE International Conference on</note>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">KB-Grid: Enabling Knowledge Sharing on the Semantic Web</title>
		<author>
			<persName><forename type="first">H</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Wu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Xu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Challenges of Large Applications in Distributed Environments, International Workshop on</title>
				<meeting><address><addrLine>Los Alamitos, CA, USA</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE Computer Society</publisher>
			<date type="published" when="2003">2003</date>
			<biblScope unit="volume">0</biblScope>
			<biblScope unit="page">70</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">A Multi-Disciplinary Approach to High Level Fusion in Predictive Situational Awareness</title>
		<author>
			<persName><forename type="first">Paulo</forename><forename type="middle">C G</forename><surname>Costa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">C</forename><surname>Chang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">N</forename><surname>Carvalho</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 12th International Conference on Information Fusion</title>
				<meeting>the 12th International Conference on Information Fusion<address><addrLine>Seattle, WA, USA</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Applying semantic web technologies to knowledge sharing in aerospace engineering</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">S</forename><surname>Dadzie</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Bhagdev</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Chakravarthy</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Chapman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Iria</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Lanfranchi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Magalhães</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Petrelli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ciravegna</forename><forename type="middle">F</forename></persName>
		</author>
		<idno type="DOI">10.1007/s10845-008-0141-1</idno>
	</analytic>
	<monogr>
		<title level="j">Journal of Intelligent Manufacturing</title>
		<imprint>
			<biblScope unit="volume">20</biblScope>
			<biblScope unit="issue">5</biblScope>
			<biblScope unit="page" from="611" to="623" />
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">Semantic Web for Knowledge Sharing</title>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">J</forename><surname>Kings</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Davies</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-540-88845-1_8</idno>
		<ptr target="http://dx.doi.org/10.1007/978-3-540-88845-1_8" />
	</analytic>
	<monogr>
		<title level="m">Semantic Knowledge Management</title>
				<imprint>
			<date type="published" when="2009">2009</date>
			<biblScope unit="page" from="103" to="111" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<monogr>
		<author>
			<persName><forename type="first">G</forename><forename type="middle">V</forename><surname>Veres</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><forename type="middle">D</forename><surname>Huynh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">S</forename><surname>Nixon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">R</forename><surname>Smart</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">R</forename><surname>Shadbolt</surname></persName>
		</author>
		<ptr target="http://eprints.ecs.soton.ac.uk/14278/" />
		<title level="m">The Military Knowledge Information Fusion Via Semantic Web Technologies</title>
				<imprint>
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<monogr>
		<title level="m" type="main">Bayesian Semantics for the Semantic Web</title>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">C G</forename><surname>Costa</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2005">2005</date>
			<biblScope unit="volume">315</biblScope>
			<pubPlace>Fairfax, VA, USA</pubPlace>
		</imprint>
		<respStmt>
			<orgName>PhD Diss. Department of Systems Engineering and Operations Research, George Mason University</orgName>
		</respStmt>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">PR-OWL: A Bayesian Ontology Language for the Semantic Web</title>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">C G</forename><surname>Costa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">J</forename><surname>Laskey</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the ISWC Workshop on Uncertainty Reasoning for the Semantic Web</title>
				<meeting>the ISWC Workshop on Uncertainty Reasoning for the Semantic Web<address><addrLine>Galway, Ireland</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">A GUI Tool for Plausible Reasoning in the Semantic Web using MEBN</title>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">N</forename><surname>Carvalho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ladeira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">L</forename><surname>Santos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">C</forename><surname>Costa</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Seventh international Conference on intelligent Systems Design and Applications</title>
				<meeting>the Seventh international Conference on intelligent Systems Design and Applications<address><addrLine>Washington, DC, USA</addrLine></address></meeting>
		<imprint>
			<publisher>ISDA. IEEE Computer Society</publisher>
			<date type="published" when="2007">2007</date>
			<biblScope unit="page" from="381" to="386" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">UnBBayes-MEBN: Comments on Implementing a Probabilistic Ontology Tool</title>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">N</forename><surname>Carvalho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ladeira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">L</forename><surname>Santos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Matsumoto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">C G</forename><surname>Costa</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the IADIS International Conference on Applied Computing</title>
				<meeting>the IADIS International Conference on Applied Computing</meeting>
		<imprint>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="211" to="218" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">A First-Order Bayesian Tool for Probabilistic Ontologies</title>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">C G</forename><surname>Costa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ladeira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">N</forename><surname>Carvalho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">L</forename><surname>Santos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Matsumoto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Twenty-First International Florida Artificial Intelligence Research Society Conference</title>
				<meeting>the Twenty-First International Florida Artificial Intelligence Research Society Conference<address><addrLine>Menlo Park, California, USA</addrLine></address></meeting>
		<imprint>
			<publisher>The AAAI Press</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="631" to="636" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">A GUI Tool for Plausible Reasoning in the Semantic Web Using MEBN</title>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">N</forename><surname>Carvalho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ladeira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">L</forename><surname>Santos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Matsumoto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">C G</forename><surname>Costa</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-540-88045-5_2</idno>
	</analytic>
	<monogr>
		<title level="m">Book Innovative Applications in Data Mining</title>
				<meeting><address><addrLine>Berlin; Heidelberg</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2009">2009</date>
			<biblScope unit="page" from="17" to="45" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<monogr>
		<title level="m" type="main">The Description Logic Handbook</title>
		<author>
			<persName><forename type="first">F</forename><surname>Baader</surname></persName>
		</author>
		<editor>, D. Calvanese, D. L. McGuinness, D. Nardi, and P. F. Patel-Schneider</editor>
		<imprint>
			<date type="published" when="2003">2003</date>
			<publisher>Cambridge University Press</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">The Semantic Web</title>
		<author>
			<persName><forename type="first">T</forename><surname>Berners-Lee</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Hendler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Lassila</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Sci. Am</title>
		<imprint>
			<biblScope unit="volume">284</biblScope>
			<biblScope unit="page" from="34" to="43" />
			<date type="published" when="2001">2001</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">The anatomy of a large-scale hypertextual web search engine</title>
		<author>
			<persName><forename type="first">S</forename><surname>Brin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Page</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Computer Networks</title>
		<imprint>
			<biblScope unit="volume">30</biblScope>
			<biblScope unit="issue">1-7</biblScope>
			<biblScope unit="page" from="107" to="117" />
			<date type="published" when="1998">1998</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<analytic>
		<title level="a" type="main">P-TAG: Large scale automatic generation of personalized annotation TAGs for the Web</title>
		<author>
			<persName><forename type="first">P.-A</forename><surname>Chirita</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Costache</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Nejdl</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Handschuh</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. WWW</title>
				<meeting>WWW</meeting>
		<imprint>
			<date type="published" when="2007">2007</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">Query answering and ontology population: An inductive approach</title>
		<author>
			<persName><forename type="first">C</forename><surname>Amato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Fanizzi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Esposito</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. ESWC</title>
				<meeting>ESWC</meeting>
		<imprint>
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<analytic>
		<title level="a" type="main">Search on the Semantic Web</title>
		<author>
			<persName><forename type="first">L</forename><surname>Ding</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><forename type="middle">W</forename><surname>Finin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Joshi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Peng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Pan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Reddivari</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Computer</title>
		<imprint>
			<biblScope unit="volume">38</biblScope>
			<biblScope unit="issue">10</biblScope>
			<biblScope unit="page" from="62" to="69" />
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b26">
	<analytic>
		<title level="a" type="main">Induction of classifiers through non-parametric methods for approximate classification and retrieval with ontologies</title>
		<author>
			<persName><forename type="first">N</forename><surname>Fanizzi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Amato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Esposito</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">International Journal of Semantic Computing</title>
		<imprint>
			<biblScope unit="volume">2</biblScope>
			<biblScope unit="issue">3</biblScope>
			<biblScope unit="page" from="403" to="423" />
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b27">
	<monogr>
		<title level="m" type="main">From Web search to Semantic Web search</title>
		<author>
			<persName><forename type="first">B</forename><surname>Fazzinga</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Gianforme</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Gottlob</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Lukasiewicz</surname></persName>
		</author>
		<idno>INFSYS RR-1843-08-11</idno>
		<ptr target="http://www.kr.tuwien.ac.at/research/reports/rr0811.pdf" />
		<imprint>
			<date type="published" when="2008-11">November 2008</date>
		</imprint>
		<respStmt>
			<orgName>Institut für Informationssysteme, TU Wien</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">Technical Report</note>
</biblStruct>

<biblStruct xml:id="b28">
	<analytic>
		<title level="a" type="main">Semantic search</title>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">V</forename><surname>Guha</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Mccool</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Miller</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. WWW</title>
				<meeting>WWW</meeting>
		<imprint>
			<date type="published" when="2003">2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b29">
	<analytic>
		<title level="a" type="main">Linking data to ontologies</title>
		<author>
			<persName><forename type="first">A</forename><surname>Poggi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Lembo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Calvanese</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>De Giacomo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Lenzerini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Rosati</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">J. Data Semantics</title>
		<imprint>
			<biblScope unit="volume">10</biblScope>
			<biblScope unit="page" from="133" to="173" />
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b30">
	<analytic>
		<title level="a" type="main">Similarity Search -The Metric Space Approach</title>
		<author>
			<persName><forename type="first">P</forename><surname>Zezula</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Amato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Dohnal</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Batko</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Advances in Database Systems</title>
		<imprint>
			<biblScope unit="volume">32</biblScope>
			<date type="published" when="2006">2006</date>
			<publisher>Springer</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b31">
	<monogr>
		<author>
			<persName><forename type="first">K</forename><surname>Laskey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Laskey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Costa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kokar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Martin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Lukasiewicz</surname></persName>
		</author>
		<ptr target="http://www.w3.org/2005/Incubator/urw3/XGR-urw3-20080331/" />
		<title level="m">Uncertainty Reasoning for the World Wide Web</title>
				<imprint>
			<publisher>W3C Incubator Group</publisher>
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b32">
	<monogr>
		<title level="m" type="main">Uncertainty and Information</title>
		<author>
			<persName><forename type="first">G</forename><surname>Klir</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2006">2006</date>
			<publisher>Wiley</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b33">
	<monogr>
		<title level="m" type="main">A Mathematical Theory of Evidence</title>
		<author>
			<persName><forename type="first">G</forename><surname>Shafer</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1976">1976</date>
			<publisher>Princeton University Press</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b34">
	<analytic>
		<title level="a" type="main">Approximate measures of semantic dissimilarity under uncertainty</title>
		<author>
			<persName><forename type="first">N</forename><surname>Fanizzi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Amato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Esposito</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Uncertainty Reasoning for the Semantic Web I</title>
				<editor>
			<persName><forename type="first">P</forename><surname>Da Costa</surname></persName>
		</editor>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="volume">5327</biblScope>
			<biblScope unit="page" from="355" to="372" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b35">
	<analytic>
		<title level="a" type="main">Using the Dempster-Shafer theory of evidence to resolve ABox inconsistencies</title>
		<author>
			<persName><forename type="first">A</forename><surname>Nikolov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Urea</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Motta</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>De Roeck</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Uncertainty Reasoning for the Semantic Web I</title>
				<editor>
			<persName><forename type="first">P</forename><surname>Da Costa</surname></persName>
		</editor>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="volume">5327</biblScope>
			<biblScope unit="page" from="143" to="160" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b36">
	<analytic>
		<title level="a" type="main">A k-nearest neighbor classication rule based on Dempster-Shafer theory</title>
		<author>
			<persName><forename type="first">T</forename><surname>Denoeux</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Transactions on Systems, Man and Cybernetics</title>
		<imprint>
			<biblScope unit="volume">25</biblScope>
			<biblScope unit="page" from="804" to="813" />
			<date type="published" when="1995">1995</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b37">
	<analytic>
		<title level="a" type="main">On the Dempster-Shafer framework and new combination rules</title>
		<author>
			<persName><forename type="first">R</forename><surname>Yager</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Information Sciences</title>
		<imprint>
			<biblScope unit="volume">41</biblScope>
			<biblScope unit="page" from="93" to="137" />
			<date type="published" when="1987">1987</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b38">
	<monogr>
		<title level="m">The Description Logic Handbook</title>
				<editor>
			<persName><forename type="first">F</forename><surname>Baader</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">D</forename><surname>Calvanese</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">D</forename><surname>Mcguinness</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">D</forename><surname>Nardi</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">P</forename><surname>Patel-Schneider</surname></persName>
		</editor>
		<imprint>
			<publisher>Cambridge University Press</publisher>
			<date type="published" when="2003">2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b39">
	<analytic>
		<title level="a" type="main">Analogical reasoning in description logics</title>
		<author>
			<persName><forename type="first">C</forename><surname>Amato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Fanizzi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Esposito</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Uncertainty Reasoning for the Semantic Web I</title>
				<editor>
			<persName><forename type="first">P</forename><surname>Da Costa</surname></persName>
		</editor>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="volume">5327</biblScope>
			<biblScope unit="page" from="336" to="354" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b40">
	<analytic>
		<title level="a" type="main">Query answering and ontology population: An inductive approach</title>
		<author>
			<persName><forename type="first">C</forename><surname>Amato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Fanizzi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Esposito</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 5th European Semantic Web Conference, ESWC2008. Volume 5021 of LNCS</title>
				<editor>
			<persName><forename type="first">S</forename><surname>Bechhofer</surname></persName>
		</editor>
		<meeting>the 5th European Semantic Web Conference, ESWC2008. Volume 5021 of LNCS</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="288" to="302" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b41">
	<analytic>
		<title level="a" type="main">Metric-based stochastic conceptual clustering for ontologies</title>
		<author>
			<persName><forename type="first">N</forename><surname>Fanizzi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Amato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Esposito</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Information Systems</title>
		<imprint>
			<biblScope unit="volume">34</biblScope>
			<biblScope unit="page" from="725" to="739" />
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b42">
	<monogr>
		<title level="m" type="main">The Elements of Statistical Learning -Data Mining, Inference, and Prediction</title>
		<author>
			<persName><forename type="first">T</forename><surname>Hastie</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Tibshirani</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Friedman</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2001">2001</date>
			<publisher>Springer</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b43">
	<analytic>
		<title level="a" type="main">Similarity in context</title>
		<author>
			<persName><forename type="first">R</forename><surname>Goldstone</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Medin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Halberstadt</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Memory and Cognition</title>
		<imprint>
			<biblScope unit="volume">25</biblScope>
			<biblScope unit="page" from="237" to="255" />
			<date type="published" when="1997">1997</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b44">
	<analytic>
		<title level="a" type="main">Towards measuring similarity in description logics</title>
		<author>
			<persName><forename type="first">A</forename><surname>Borgida</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Walsh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Hirsh</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Working Notes of the International Description Logics Workshop</title>
				<editor>
			<persName><forename type="first">I</forename><surname>Horrocks</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">U</forename><surname>Sattler</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">F</forename><surname>Wolter</surname></persName>
		</editor>
		<meeting><address><addrLine>Edinburgh, UK</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2005">2005</date>
			<biblScope unit="volume">147</biblScope>
		</imprint>
	</monogr>
	<note>CEUR Workshop Proceedings.</note>
</biblStruct>

<biblStruct xml:id="b45">
	<analytic>
		<title level="a" type="main">On the influence of description logics ontologies on conceptual similarity</title>
		<author>
			<persName><forename type="first">C</forename><surname>Amato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Staab</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Fanizzi</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 16th Knowledge Engineering Conference, EKAW2008</title>
				<editor>
			<persName><forename type="first">A</forename><surname>Gangemi</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><surname>Euzenat</surname></persName>
		</editor>
		<meeting>the 16th Knowledge Engineering Conference, EKAW2008</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="volume">5268</biblScope>
			<biblScope unit="page" from="48" to="63" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b46">
	<analytic>
		<title level="a" type="main">The Instance Store: DL reasoning with large numbers of individuals</title>
		<author>
			<persName><forename type="first">I</forename><surname>Horrocks</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Turi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Bechhofer</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2004 Description Logic Workshop</title>
		<title level="s">CEUR Workshop Proceedings.</title>
		<editor>
			<persName><forename type="first">V</forename><surname>Haarslev</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">R</forename><surname>Möller</surname></persName>
		</editor>
		<meeting>the 2004 Description Logic Workshop</meeting>
		<imprint>
			<date type="published" when="2004">2004. 2004</date>
			<biblScope unit="volume">104</biblScope>
			<biblScope unit="page" from="31" to="40" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b47">
	<analytic>
		<title level="a" type="main">Infinite hidden semantic models for learning with OWL DL</title>
		<author>
			<persName><forename type="first">A</forename><surname>Rettinger</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Nickles</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 1st ESWC Workshop on Inductive Reasoning and Machine Learning for the Semantic Web, IRMLeS09</title>
		<title level="s">CEUR Workshop Proceedings</title>
		<editor>
			<persName><forename type="first">C</forename><surname>Amato</surname></persName>
		</editor>
		<meeting>1st ESWC Workshop on Inductive Reasoning and Machine Learning for the Semantic Web, IRMLeS09<address><addrLine>Heraklion, Greece</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2009">2009</date>
			<biblScope unit="volume">474</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b48">
	<analytic>
		<title level="a" type="main">Materializing and querying learned knowledge</title>
		<author>
			<persName><forename type="first">V</forename><surname>Tresp</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Bundschus</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Rettinger</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 1st ESWC Workshop on Inductive Reasoning and Machine Learning for the Semantic Web, IRMLeS09</title>
		<title level="s">CEUR Workshop Proceedings</title>
		<editor>
			<persName><forename type="first">C</forename><surname>Amato</surname></persName>
		</editor>
		<meeting>1st ESWC Workshop on Inductive Reasoning and Machine Learning for the Semantic Web, IRMLeS09<address><addrLine>Heraklion, Greece</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2009">2009</date>
			<biblScope unit="volume">474</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b49">
	<analytic>
		<title level="a" type="main">Completing description logic knowledge bases using formal concept analysis</title>
		<author>
			<persName><forename type="first">F</forename><surname>Baader</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Ganter</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Sertkaya</surname></persName>
		</author>
		<author>
			<persName><forename type="first">U</forename><surname>Sattler</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 20th International Joint Conference on Artificial Intelligence</title>
				<editor>
			<persName><forename type="first">M</forename><surname>Veloso</surname></persName>
		</editor>
		<meeting>the 20th International Joint Conference on Artificial Intelligence<address><addrLine>Hyderabad, India</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2007">2007</date>
			<biblScope unit="page" from="230" to="235" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b50">
	<analytic>
		<title level="a" type="main">Modal logic interpretation of Dempster-Shafer theory: An infinite case</title>
		<author>
			<persName><forename type="first">D</forename><surname>Harmanec</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Klir</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Wang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">International Journal of Approximate Reasoning</title>
		<imprint>
			<biblScope unit="volume">14</biblScope>
			<biblScope unit="page" from="81" to="93" />
			<date type="published" when="1996">1996</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b51">
	<analytic>
		<title level="a" type="main">A possibilistic extension of description logics</title>
		<author>
			<persName><forename type="first">G</forename><surname>Qi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Pan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Q</forename><surname>Ji</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Working Notes of the 20th International Description Logics Workshop</title>
				<editor>
			<persName><forename type="first">D</forename><surname>Calvanese</surname></persName>
		</editor>
		<meeting><address><addrLine>Bressanone, Italy</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2007">DL2007. 2007</date>
			<biblScope unit="volume">250</biblScope>
			<biblScope unit="page" from="435" to="442" />
		</imprint>
	</monogr>
	<note>CEUR Workshop Proceedings.</note>
</biblStruct>

<biblStruct xml:id="b52">
	<monogr>
		<author>
			<persName><forename type="first">H</forename><surname>Jeffreys</surname></persName>
		</author>
		<title level="m">Theory of Probability</title>
				<imprint>
			<publisher>Oxford University Press</publisher>
			<date type="published" when="1961">1961</date>
		</imprint>
	</monogr>
	<note>3 rd ed</note>
</biblStruct>

<biblStruct xml:id="b53">
	<monogr>
		<title level="m" type="main">The Foundations of Statistics</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">J</forename><surname>Savage</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1954">1954</date>
			<publisher>Wiley</publisher>
			<pubPlace>New York</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b54">
	<analytic>
		<title level="a" type="main">Uncertainty Representation and Reasoning in the Semantic Web</title>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">C G</forename><surname>Costa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Lukasiewicz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Semantic Web Engineering in the Knowledge Society, Idea Information Science</title>
				<imprint>
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b55">
	<monogr>
		<author>
			<persName><forename type="first">I</forename><forename type="middle">J</forename><surname>Good</surname></persName>
		</author>
		<title level="m">Probability and the Weighing of Evidence</title>
				<meeting><address><addrLine>London</addrLine></address></meeting>
		<imprint>
			<publisher>Charles Griffin and Co</publisher>
			<date type="published" when="1950">1950</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b56">
	<analytic>
		<title level="a" type="main">Probabilistic Reasoning in a Classical Logic</title>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">S</forename><surname>Ng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">W</forename><surname>Lloyd</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Applied Logic</title>
		<imprint>
			<biblScope unit="volume">7</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="218" to="238" />
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b57">
	<analytic>
		<title level="a" type="main">The Definition of Random Sequences</title>
		<author>
			<persName><forename type="first">P</forename><surname>Martin-Lof</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Information and Control</title>
		<imprint>
			<biblScope unit="volume">9</biblScope>
			<biblScope unit="page" from="602" to="619" />
			<date type="published" when="1966">1966</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b58">
	<monogr>
		<title level="m" type="main">An Introduction to Kolmogorov Complexity and Its Applications</title>
		<author>
			<persName><forename type="first">M</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Vitányi</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1997">1997</date>
			<publisher>Springer</publisher>
		</imprint>
	</monogr>
	<note>2 nd ed</note>
</biblStruct>

<biblStruct xml:id="b59">
	<analytic>
		<title level="a" type="main">Calibration-Based Empirical Probability</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">P</forename><surname>Dawid</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">The Annals of Statistics</title>
		<imprint>
			<biblScope unit="volume">13</biblScope>
			<biblScope unit="issue">4</biblScope>
			<biblScope unit="page" from="1251" to="1274" />
			<date type="published" when="1985">1985</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b60">
	<monogr>
		<title level="m" type="main">A Mathematical Introduction to Logic</title>
		<author>
			<persName><forename type="first">H</forename><forename type="middle">B</forename><surname>Enderton</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2001">2001</date>
			<publisher>Harcourt Academic Press</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b61">
	<analytic>
		<title level="a" type="main">An Analysis of First-Order Logics of Probability</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">Y</forename><surname>Halpern</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Artificial Intelligence</title>
		<imprint>
			<biblScope unit="volume">46</biblScope>
			<biblScope unit="page" from="311" to="350" />
			<date type="published" when="1991">1991</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b62">
	<analytic>
		<title level="a" type="main">Probabilities over Rich Languages</title>
		<author>
			<persName><forename type="first">H</forename><surname>Gaifman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Snir</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Symbolic Logic</title>
		<imprint>
			<biblScope unit="volume">47</biblScope>
			<biblScope unit="page" from="495" to="548" />
			<date type="published" when="1982">1982</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b63">
	<monogr>
		<title level="m" type="main">Representing and Reasoning with Probabilistic Knowledge: A Logical Approach to Probabilities</title>
		<author>
			<persName><forename type="first">F</forename><surname>Bacchus</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1990">1990</date>
			<publisher>MIT Press</publisher>
			<pubPlace>Boston, MA</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b64">
	<analytic>
		<title level="a" type="main">Mathematical Logic</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">R</forename><surname>Shoenfeld</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Association for Symbolic Logic</title>
				<imprint>
			<date type="published" when="1967">1967</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b65">
	<monogr>
		<title level="m" type="main">A Decision Method for Elementary Algebra and Geometry</title>
		<author>
			<persName><forename type="first">A</forename><surname>Tarski</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1951">1951</date>
			<publisher>Univ. of California Press</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b66">
	<analytic>
		<title level="a" type="main">Concerning Measures in First-Order Calculi</title>
		<author>
			<persName><forename type="first">H</forename><surname>Gaifman</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Israel Journal of Mathematics</title>
		<imprint>
			<biblScope unit="volume">2</biblScope>
			<biblScope unit="page" from="1" to="18" />
			<date type="published" when="1964">1964</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b67">
	<monogr>
		<author>
			<persName><forename type="first">I</forename><forename type="middle">J</forename><surname>Good</surname></persName>
		</author>
		<title level="m">Good Thinking: The Foundations of Probability and Its Applications</title>
				<imprint>
			<publisher>University of Minnesota Press</publisher>
			<date type="published" when="1983">1983</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b68">
	<monogr>
		<title level="m" type="main">Comparative Statistical Inference</title>
		<author>
			<persName><forename type="first">V</forename><surname>Barnett</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1999">1999</date>
			<publisher>Wiley</publisher>
			<pubPlace>New York</pubPlace>
		</imprint>
	</monogr>
	<note>3 rd ed</note>
</biblStruct>

<biblStruct xml:id="b69">
	<monogr>
		<title level="m" type="main">Theory of Probability: A Critical Introductory Treatment</title>
		<author>
			<persName><forename type="first">Bruno</forename><surname>De Finetti</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1974">1974</date>
			<publisher>Wiley</publisher>
			<pubPlace>New York</pubPlace>
		</imprint>
	</monogr>
	<note>originally published in 1934</note>
</biblStruct>

<biblStruct xml:id="b70">
	<analytic>
		<title level="a" type="main">MEBN: A Language for First-Order Bayesian Knowledge Bases</title>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Artificial Intelligence</title>
		<imprint>
			<biblScope unit="volume">172</biblScope>
			<biblScope unit="issue">2-3</biblScope>
			<biblScope unit="page" from="140" to="178" />
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b71">
	<analytic>
		<title level="a" type="main">Modeling Uncertainty for Plausible Reasoning in the Semantic Web</title>
		<author>
			<persName><forename type="first">R</forename><surname>Carvalho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Costa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ladeira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Santos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Matsumoto</surname></persName>
		</author>
		<author>
			<persName><surname>Unbbayes</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Semantic Web</title>
				<editor>
			<persName><forename type="first">V</forename><surname>Kordic</surname></persName>
		</editor>
		<imprint>
			<publisher>ISBN</publisher>
			<biblScope unit="page" from="978" to="953" />
		</imprint>
	</monogr>
	<note>in press) References</note>
</biblStruct>

<biblStruct xml:id="b72">
	<analytic>
		<title level="a" type="main">Foundations of onto-relational learning</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">A</forename><surname>Lisi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Esposito</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ILP &apos;08: Proceedings of the 18th International Conference on Inductive Logic Programming</title>
				<meeting><address><addrLine>Berlin, Heidelberg</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="158" to="175" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b73">
	<analytic>
		<title level="a" type="main">An algorithm based on counterfactuals for concept learning in the semantic web</title>
		<author>
			<persName><forename type="first">L</forename><surname>Iannone</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Palmisano</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Fanizzi</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Applied Intelligence</title>
		<imprint>
			<biblScope unit="volume">26</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="139" to="159" />
			<date type="published" when="2007">2007</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b74">
	<analytic>
		<title level="a" type="main">A refinement operator based learning algorithm for the ALC description logic</title>
		<author>
			<persName><forename type="first">J</forename><surname>Lehmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Hitzler</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ILP &apos;08: Proceedings of the 17th International Conference on Inductive Logic Programming</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">H</forename><surname>Blockeel</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><forename type="middle">W</forename><surname>Shavlik</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">P</forename><surname>Tadepalli</surname></persName>
		</editor>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="volume">4894</biblScope>
			<biblScope unit="page" from="147" to="160" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b75">
	<monogr>
		<title level="m" type="main">Inductive Logic Programming</title>
		<author>
			<persName><forename type="first">S</forename><surname>Muggleton</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1992">1992</date>
			<publisher>McGraw-Hill</publisher>
			<pubPlace>New York</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b76">
	<analytic>
		<title level="a" type="main">Loopy propagation in a probabilistic description logic</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">G</forename><surname>Cozman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">B</forename><surname>Polastro</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">SUM &apos;08: Proceedings of the 2nd International Conference on Scalable Uncertainty Management</title>
				<meeting><address><addrLine>Berlin, Heidelberg</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="120" to="133" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b77">
	<monogr>
		<title level="m" type="main">Introduction to Statistical Relational Learning (Adaptive Computation and Machine Learning</title>
		<author>
			<persName><forename type="first">L</forename><surname>Getoor</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Taskar</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2007">2007</date>
			<publisher>The MIT Press</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b78">
	<analytic>
		<title level="a" type="main">Assembling a consistent set of sentences in relational probabilistic logic with stochastic independence</title>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">P</forename><surname>De Campos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">G</forename><surname>Cozman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">E</forename><surname>Ochoa-Luna</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Applied Logic</title>
		<imprint>
			<biblScope unit="volume">7</biblScope>
			<biblScope unit="page" from="137" to="154" />
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b79">
	<monogr>
		<title level="m" type="main">Sentential Probability Logic</title>
		<author>
			<persName><forename type="first">T</forename><surname>Hailperin</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1996">1996</date>
			<publisher>Lehigh University Press</publisher>
			<pubPlace>Bethlehem, United States</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b80">
	<analytic>
		<title level="a" type="main">Complexity analysis and variational inference for interpretation-based probabilistic description logics</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">G</forename><surname>Cozman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Polastro</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Conference on Uncertainty in Artificial Intelligence</title>
				<imprint>
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b81">
	<analytic>
		<title level="a" type="main">Learning logical definitions from relations</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">R</forename><surname>Quinlan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Mostow</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Machine Learning</title>
				<imprint>
			<date type="published" when="1990">1990</date>
			<biblScope unit="page" from="239" to="266" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b82">
	<monogr>
		<title level="m" type="main">Probabilistic Reasoning in Intelligent Systems: Networks of Plausible Inference</title>
		<author>
			<persName><forename type="first">J</forename><surname>Pearl</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1998">1998</date>
			<publisher>Morgan Kaufmann</publisher>
			<pubPlace>San Mateo</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b83">
	<analytic>
		<title level="a" type="main">Noisy-OR classifier: Research articles</title>
		<author>
			<persName><forename type="first">J</forename><surname>Vomlel</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Int. J. Intell. Syst</title>
		<imprint>
			<biblScope unit="volume">21</biblScope>
			<biblScope unit="issue">3</biblScope>
			<biblScope unit="page" from="381" to="398" />
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b84">
	<analytic>
		<title level="a" type="main">Basic description logics</title>
		<author>
			<persName><forename type="first">F</forename><surname>Baader</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Nutt</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Description Logic Handbook</title>
				<imprint>
			<publisher>Cambridge University Press</publisher>
			<date type="published" when="2002">2002</date>
			<biblScope unit="page" from="47" to="100" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b85">
	<analytic>
		<title level="a" type="main">DL-FOIL concept learning in description logics</title>
		<author>
			<persName><forename type="first">N</forename><surname>Fanizzi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>D'amato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Esposito</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ILP &apos;08: Proceedings of the 18th International Conference on Inductive Logic Programming</title>
				<meeting><address><addrLine>Berlin, Heidelberg</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="107" to="121" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b86">
	<monogr>
		<title level="m" type="main">Machine Learning</title>
		<author>
			<persName><forename type="first">T</forename><surname>Mitchell</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1997">1997</date>
			<publisher>McGraw-Hill</publisher>
			<pubPlace>New York</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b87">
	<analytic>
		<title level="a" type="main">Probabilistic inductive logic programming</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">D</forename><surname>Raedt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Kersting</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Probabilistic ILP -LNAI 4911</title>
				<meeting><address><addrLine>Berlin</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="1" to="27" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b88">
	<analytic>
		<title level="a" type="main">Inductive logic programming: Theory and methods</title>
		<author>
			<persName><forename type="first">S</forename><surname>Muggleton</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">D</forename><surname>Raedt</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Logic Programming</title>
		<imprint>
			<biblScope unit="volume">19</biblScope>
			<biblScope unit="issue">20</biblScope>
			<biblScope unit="page" from="629" to="679" />
			<date type="published" when="1994">1994</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b89">
	<analytic>
		<title level="a" type="main">Learnability of description logic programs</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">U</forename><surname>Kietz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Inductive Logic Programming</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2002">2002</date>
			<biblScope unit="page" from="117" to="132" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b90">
	<analytic>
		<title level="a" type="main">Towards learning in CARIN-ALN</title>
		<author>
			<persName><forename type="first">C</forename><surname>Rouveirol</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Ventos</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ILP &apos;00: Proceedings of the 10th International Conference on Inductive Logic Programming</title>
				<meeting><address><addrLine>London, UK</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2000">2000</date>
			<biblScope unit="page" from="191" to="208" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b91">
	<analytic>
		<title level="a" type="main">AL-log: integrating Datalog and description logics</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Donini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Lenzerini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Nardi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Schaerf</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Intelligent and Cooperative Information Systems</title>
		<imprint>
			<biblScope unit="volume">10</biblScope>
			<biblScope unit="page" from="227" to="252" />
			<date type="published" when="1998">1998</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b92">
	<analytic>
		<title level="a" type="main">DL+log: Tight integration of description logics and disjunctive datalog</title>
		<author>
			<persName><forename type="first">R</forename><surname>Rosati</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">KR</title>
		<imprint>
			<biblScope unit="page" from="68" to="78" />
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b93">
	<analytic>
		<title level="a" type="main">Learning the CLASSIC description logic: Theoretical and experimental results</title>
		<author>
			<persName><forename type="first">W</forename><surname>Cohen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Hirsh</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">KR94): Principles of Knowledge Representation and Reasoning: Proceedings of the Fourth International Conference</title>
				<imprint>
			<publisher>Morgan Kaufmann</publisher>
			<date type="published" when="1994">1994</date>
			<biblScope unit="page" from="121" to="133" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b94">
	<analytic>
		<title level="a" type="main">A refinement operator for description logics</title>
		<author>
			<persName><forename type="first">L</forename><surname>Badea</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">H</forename><surname>Nienhuys-Cheng</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ILP &apos;00: Proceedings of the 10th International Conference on Inductive Logic Programming</title>
				<meeting><address><addrLine>London, UK</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2000">2000</date>
			<biblScope unit="page" from="40" to="59" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b95">
	<analytic>
		<title level="a" type="main">Hybrid learning of ontology classes</title>
		<author>
			<persName><forename type="first">J</forename><surname>Lehmann</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 5th International Conference on Machine Learning and Data Mining</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<meeting>the 5th International Conference on Machine Learning and Data Mining</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2007">2007</date>
			<biblScope unit="volume">4571</biblScope>
			<biblScope unit="page" from="883" to="898" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b96">
	<analytic>
		<title level="a" type="main">Top-down induction of logic programs from incomplete samples</title>
		<author>
			<persName><forename type="first">N</forename><surname>Inuzuka</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kamo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Ishii</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Seki</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Itoh</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ILP &apos;96 : 6th International Workshop</title>
				<imprint>
			<date type="published" when="1997">1997</date>
			<biblScope unit="volume">1314</biblScope>
			<biblScope unit="page" from="265" to="284" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b97">
	<analytic>
		<title level="a" type="main">Integrating Naïve Bayes and FOIL</title>
		<author>
			<persName><forename type="first">N</forename><surname>Landwehr</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Kersting</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Deraedt</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">J. Mach. Learn. Res</title>
		<imprint>
			<biblScope unit="volume">8</biblScope>
			<biblScope unit="page" from="481" to="507" />
			<date type="published" when="2007">2007</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b98">
	<monogr>
		<title level="m" type="main">Uncertainty representation and reasoning in directed evidential networks</title>
		<author>
			<persName><forename type="first">B</forename><surname>Ben Yaghlane</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2002">2002</date>
		</imprint>
		<respStmt>
			<orgName>Institut Supérieur de Gestion de Tunis Tunisia</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">PhD thesis</note>
</biblStruct>

<biblStruct xml:id="b99">
	<monogr>
		<title level="m" type="main">BayesOWL: A Probabilistic Framework for Semantic Web</title>
		<author>
			<persName><forename type="first">Z</forename><surname>Ding</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2005">2005</date>
			<pubPlace>Baltimore Country</pubPlace>
		</imprint>
		<respStmt>
			<orgName>University of Maryland</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">PhD thesis</note>
</biblStruct>

<biblStruct xml:id="b100">
	<analytic>
		<title level="a" type="main">Representing probabilistic relations in RDF</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Fukushige</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">proc. of Work. on URSW at the 4th ISWC</title>
				<meeting>of Work. on URSW at the 4th ISWC<address><addrLine>Galway, Ireland</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b101">
	<analytic>
		<title level="a" type="main">Extending OWL by fuzzy description logic</title>
		<author>
			<persName><forename type="first">M</forename><surname>Gao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Liu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">proc. of the 17th IEEE ICTAI05</title>
				<meeting>of the 17th IEEE ICTAI05<address><addrLine>Hong Kong, China</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2005">2005</date>
			<biblScope unit="page" from="562" to="567" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b102">
	<monogr>
		<title level="m" type="main">A Mathematical Theory of Evidence</title>
		<author>
			<persName><forename type="first">G</forename><surname>Shafer</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1976">1976</date>
			<publisher>Princeton University Press</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b103">
	<analytic>
		<title level="a" type="main">Fuzzy OWL: Uncertainty and the Semantic Web</title>
		<author>
			<persName><forename type="first">G</forename><surname>Stoilos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Stamou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Tzouvaras</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Pan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Horrocks</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">proc. of the Inter. Work. on OWL-ED05</title>
				<meeting>of the Inter. Work. on OWL-ED05<address><addrLine>Galway, Ireland</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b104">
	<analytic>
		<title level="a" type="main">OntoBayes: An Ontology-Driven Uncertainty Model</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Yang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Calmet</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">proc. of CIMSCA/IAWTIC</title>
				<meeting>of CIMSCA/IAWTIC<address><addrLine>Washington, DC, USA</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE Computer Society</publisher>
			<date type="published" when="2005">2005</date>
			<biblScope unit="page" from="457" to="463" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b105">
	<analytic>
		<title level="a" type="main">Functional genomic hypothesis generation and experimentation by a robot scientist</title>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">D</forename><surname>King</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Nature</title>
		<imprint>
			<biblScope unit="page" from="247" to="251" />
			<date type="published" when="2004">2004. 6971</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b106">
	<monogr>
		<title level="m" type="main">The Act of Creation</title>
		<author>
			<persName><forename type="first">A</forename><surname>Koestler</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1964">1964</date>
			<publisher>Macmillan</publisher>
			<biblScope unit="page">751</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b107">
	<monogr>
		<author>
			<persName><forename type="first">D</forename><surname>Sherwood</surname></persName>
		</author>
		<ptr target="www.silverbulletmachine.com" />
		<title level="m">SilverBullet Machine::Guide to Creativity</title>
				<imprint>
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b108">
	<analytic>
		<title level="a" type="main">Koestler&apos;s Law: The Act of Discovering Creativity-And How to Apply It in Your Law Practice</title>
		<author>
			<persName><forename type="first">D</forename><surname>Sherwood</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Law Practice</title>
		<imprint>
			<biblScope unit="volume">32</biblScope>
			<biblScope unit="issue">8</biblScope>
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b109">
	<monogr>
		<title level="m" type="main">Granular Assoc Rules for Multiple Taxonomies: A Mass Assignment Approach in Uncertain Reasoning in the Sem Web</title>
		<author>
			<persName><forename type="first">T</forename><forename type="middle">P</forename><surname>Martin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Azvine</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Shen</surname></persName>
		</author>
		<editor>M.Nickles</editor>
		<imprint>
			<date type="published" when="2008">2008</date>
			<publisher>Springer</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b110">
	<analytic>
		<title level="a" type="main">Semantic Web</title>
		<author>
			<persName><forename type="first">T</forename><surname>Berners-Lee</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Hendler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Lassila</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Scientific American</title>
		<imprint>
			<biblScope unit="page" from="28" to="37" />
			<date type="published" when="2001">2001</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b111">
	<monogr>
		<title level="m" type="main">Formal Concept Analysis: Mathematical Foundations</title>
		<author>
			<persName><forename type="first">B</forename><surname>Ganter</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Wille</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1998">1998</date>
			<publisher>Springer</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b112">
	<monogr>
		<title level="m" type="main">Formal Concept Analysis in Information Science</title>
		<author>
			<persName><forename type="first">U</forename><surname>Priss</surname></persName>
		</author>
		<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b113">
	<analytic>
		<title level="a" type="main">Extracting Taxonomies from Data -a Case Study using Fuzzy FCA</title>
		<author>
			<persName><forename type="first">A</forename><surname>Majidian</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><forename type="middle">P</forename><surname>Martin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Web Intelligence-09</title>
				<meeting><address><addrLine>Milan, Italy</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE Computing</publisher>
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b114">
	<monogr>
		<title level="m" type="main">Majidian References</title>
		<author>
			<persName><forename type="first">T</forename><surname>Martin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Siyao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename></persName>
		</author>
		<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b115">
	<analytic>
		<title level="a" type="main">Uncertainty Reasoning for the World Wide Web</title>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">J</forename><surname>Laskey</surname></persName>
		</author>
		<ptr target="http://www.w3.org/2005/Incubator/urw3/XGR-urw3/" />
	</analytic>
	<monogr>
		<title level="m">W3C Incubator Group Report</title>
				<imprint>
			<date type="published" when="2008-03-31">31 March 2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b116">
	<monogr>
		<author>
			<persName><forename type="first">T</forename><surname>Berners-Lee</surname></persName>
		</author>
		<ptr target="http://www.w3.org/DesignIssues/LinkedData.html" />
		<title level="m">Linked Data</title>
				<imprint>
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b117">
	<analytic>
		<title level="a" type="main">Media Meets Semantic Web ---How the BBC Uses DBpedia and Linked Data to Make Connections</title>
		<author>
			<persName><forename type="first">G</forename><surname>Kobilarov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of the 6th European Semantic Web Conference on the Semantic Web: Research and Applications</title>
		<title level="s">Lecture Notes In Computer Science</title>
		<meeting>of the 6th European Semantic Web Conference on the Semantic Web: Research and Applications<address><addrLine>Heraklion, Crete, Greece; Berlin, Heidelberg</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2009">2009</date>
			<biblScope unit="volume">5554</biblScope>
			<biblScope unit="page" from="723" to="737" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b118">
	<analytic>
		<title level="a" type="main">Duplicate Record Detection: A Survey</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">K</forename><surname>Elmagarmid</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">G</forename><surname>Ipeirotis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><forename type="middle">S</forename><surname>Verykios</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Transactions on Knowledge and Data Engineering</title>
		<imprint>
			<biblScope unit="volume">19</biblScope>
			<date type="published" when="2007">2007</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b119">
	<analytic>
		<title level="a" type="main">Named graphs, provenance and trust</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">J</forename><surname>Carroll</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Bizer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Hayes</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Stickler</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 14th international Conference on World Wide Web</title>
				<meeting>the 14th international Conference on World Wide Web<address><addrLine>Chiba, Japan; New York, NY</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2005">May 10 -14, 2005. 2005</date>
			<biblScope unit="page" from="613" to="622" />
		</imprint>
	</monogr>
	<note>WWW &apos;05</note>
</biblStruct>

<biblStruct xml:id="b120">
	<monogr>
		<author>
			<persName><forename type="first">K</forename><surname>Alexander</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Cyganiak</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Hausenblas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Zhao</surname></persName>
		</author>
		<ptr target="http://rdfs.org/ns/void-guide(2009" />
		<title level="m">voidD Guide : Using the vocabulary of Interlinked Datasets</title>
				<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b121">
	<monogr>
		<title level="m" type="main">Uncertainty and Vagueness in Knowledge Based Systems</title>
		<author>
			<persName><forename type="first">R</forename><surname>Kruse</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Schwecke</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Heinsohn</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1991">1991</date>
			<publisher>Boutheina</publisher>
			<biblScope unit="page">77</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b122">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Rommel</forename><forename type="middle">N</forename><surname>Carvalho</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">3</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b123">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Fabio</forename><surname>Cozman</surname></persName>
		</author>
		<author>
			<persName><surname>Gagliardi</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">63</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b124">
	<analytic>
		<title/>
		<author>
			<persName><forename type="first">Paulo</forename><forename type="middle">C G</forename><surname>Da Costa</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Claudia</title>
		<imprint>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="page">27</biblScope>
		</imprint>
	</monogr>
	<note>Amato</note>
</biblStruct>

<biblStruct xml:id="b125">
	<analytic>
		<title/>
		<author>
			<persName><surname>Esposito</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Floriana</title>
		<imprint>
			<biblScope unit="page">27</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b126">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Amira</forename><surname>Essaid</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">77</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b127">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Nicola</forename><surname>Fanizzi</surname></persName>
		</author>
		<imprint>
			<biblScope unit="volume">15</biblScope>
			<biblScope unit="page">27</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b128">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Bettina</forename><surname>Fazzinga</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">15</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b129">
	<analytic>
		<title/>
		<author>
			<persName><surname>Gajderowicz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Bart</title>
		<imprint>
			<biblScope unit="page">39</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b130">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Georg</forename><surname>Gottlob</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">15</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b131">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Marcelo</forename><surname>Ladeira</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">3</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b132">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Kathryn</forename><forename type="middle">B</forename><surname>Laskey</surname></persName>
		</author>
		<imprint>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="page">51</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b133">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Thomas</forename><surname>Lukasiewicz</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">15</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b134">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Andrei</forename><surname>Majidian</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">77</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b135">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Trevor</forename><surname>Martin</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">77</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b136">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Shou</forename><surname>Matsumoto</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">3</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b137">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Ochoa</forename><surname>Luna</surname></persName>
		</author>
		<author>
			<persName><forename type="first">;</forename><surname>José</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Eduardo</forename></persName>
		</author>
		<imprint>
			<biblScope unit="page">63</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b138">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Dave</forename><surname>Reynolds</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">81</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b139">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Alireza</forename><surname>Sadeghian</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">39</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b140">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Laécio</forename><forename type="middle">L</forename><surname>Santos</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">3</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b141">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">Zheng</forename><surname>Siyao</surname></persName>
		</author>
		<imprint>
			<biblScope unit="page">77</biblScope>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
