<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">AliQAn, Spanish QA System at CLEF-2005</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">S</forename><surname>Roger</surname></persName>
							<email>sroger@dlsi.ua.es</email>
							<affiliation key="aff0">
								<orgName type="department">Grupo de Investigación en Procesamiento del Lenguaje y Sistemas de Información Departamento de Lenguajes y Sistemas</orgName>
								<orgName type="institution">Informáticos</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University of Alicante</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Departamento de Computación</orgName>
								<orgName type="institution">University of Comahue</orgName>
								<address>
									<country key="AR">Argentine</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">S</forename><surname>Ferrández</surname></persName>
							<email>sferrandez@dlsi.ua.es</email>
							<affiliation key="aff0">
								<orgName type="department">Grupo de Investigación en Procesamiento del Lenguaje y Sistemas de Información Departamento de Lenguajes y Sistemas</orgName>
								<orgName type="institution">Informáticos</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University of Alicante</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">A</forename><surname>Ferrández</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Grupo de Investigación en Procesamiento del Lenguaje y Sistemas de Información Departamento de Lenguajes y Sistemas</orgName>
								<orgName type="institution">Informáticos</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University of Alicante</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">J</forename><surname>Peral</surname></persName>
							<email>jperal@dlsi.ua.es</email>
							<affiliation key="aff0">
								<orgName type="department">Grupo de Investigación en Procesamiento del Lenguaje y Sistemas de Información Departamento de Lenguajes y Sistemas</orgName>
								<orgName type="institution">Informáticos</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University of Alicante</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">F</forename><surname>Llopis</surname></persName>
							<email>llopis@dlsi.ua.es</email>
							<affiliation key="aff0">
								<orgName type="department">Grupo de Investigación en Procesamiento del Lenguaje y Sistemas de Información Departamento de Lenguajes y Sistemas</orgName>
								<orgName type="institution">Informáticos</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University of Alicante</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">A</forename><surname>Aguilar</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Grupo de Investigación en Procesamiento del Lenguaje y Sistemas de Información Departamento de Lenguajes y Sistemas</orgName>
								<orgName type="institution">Informáticos</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University of Alicante</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">D</forename><surname>Tomás</surname></persName>
							<email>dtomas@dlsi.ua.es</email>
							<affiliation key="aff0">
								<orgName type="department">Grupo de Investigación en Procesamiento del Lenguaje y Sistemas de Información Departamento de Lenguajes y Sistemas</orgName>
								<orgName type="institution">Informáticos</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University of Alicante</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">AliQAn, Spanish QA System at CLEF-2005</title>
					</analytic>
					<monogr>
						<imprint>
							<date/>
						</imprint>
					</monogr>
					<idno type="MD5">F6F3FA665263BDFDE7A10A58DB1FB516</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-25T00:37+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>H.3 [Information Storage and Retrieval]: H.3.3 Information Search and Retrieval Algorithms</term>
					<term>Measurement</term>
					<term>Performance</term>
					<term>Experimentation Question Answering for Spanish</term>
					<term>Syntactic Patterns</term>
					<term>Natural Language Processing</term>
					<term>Word Sense Disambiguation</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>This paper describes AliQAn, a monolingual open-domain Question Answering (QA) System developed in the Department of Language Processing and Information Systems at the University of Alicante for CLEF-2005 Spanish monolingual QA evaluation task.</p><p>Our approach is based fundamentally on the use of syntactic pattern recognition in order to identify possible answers. Beside, Word Sense Disambiguation (WSD) is applied to improve the system. The results achieved (overall accuracy of 33.00%) are shown and discussed in the paper.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Question Answering (QA) is not a simple task of Information Retrieval (IR). A QA system must provide concise answers to questions stated by the user in natural language.</p><p>The research in open domain QA has mainly focused around English due to the advances in IR and Natural Language Processing (NLP). However, the Cross-Language Evaluation Forum Campaigns (CLEF) provide a multilingual forum for evaluation of QA systems in languages other than English. Multilingual open domain QA systems have been recognized as an important issue for the future of information search.</p><p>Nowadays, there are several types of implementations of Spanish QA systems. Generally, most of the systems are based on NLP tools <ref type="bibr" target="#b1">[2,</ref><ref type="bibr" target="#b4">5,</ref><ref type="bibr" target="#b6">7,</ref><ref type="bibr" target="#b7">8]</ref>, such as Part of Speech (PoS) taggers, syntactic parsers, etcetera. On the other hand, some other approaches use machine learning and statistical models <ref type="bibr" target="#b2">[3]</ref> like Hidden Markov Models in order to find the answer. Also, there are systems that combine NLP tools with statistical data redundancy techniques <ref type="bibr" target="#b8">[9,</ref><ref type="bibr" target="#b9">10]</ref>.</p><p>The systems based on NLP tools are complex because of the number of different NLP tools that they use. Moreover, a good integration between them is needed. Our system have been developed during the last two years in the Departament of Language Processing and Information Systems at the University of Alicante. It is based on complex pattern matching using NLP tools. Beside, Word Sense Disambiguation (WSD) is applied to improve the system.</p><p>As usual, in our approach, three task have been defined: question analysis, selection of relevant passages and extraction of the answer.</p><p>The rest of this paper is organized as follows: section two describes the structure and functionality of the system. Afterwards, the achieved results are shown and discussed in section three and finally, section four details our conclusions and future work.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">System description 2.1 Overview</head><p>In this section, the structure and functionality of our approach to open domain Spanish QA system are detailed. The next paragraph presents the phases of the QA process.</p><p>Our approach is based fundamentally on syntactic analysis of the questions and the Spanish documents (the EFE collection in this case), where the system tries to localize the answer. In order to make the syntactic analysis, SUPAR <ref type="bibr" target="#b3">[4]</ref> system is used, which works in the output of a PoS tagger <ref type="bibr" target="#b0">[1]</ref>. SUPAR performs partial syntactic analysis that lets us to identify the different grammatical structures of the sentence. Syntactic blocks (SB) are extracted, and they are our basic syntactic unit to define patterns.</p><p>Using the output of SUPAR we are going to identify three types of SB: verb phrase (VP), simple nominal phrase (NP) and simple prepositional phrase (PP). For example in the sentence: Hillary Clinton was in Jerusalen, the obtained list of SB is: [NP, hillary*clinton] [VP, to be] [PP, in: jerusalen].</p><p>The overall architecture of our system (Figure <ref type="figure" target="#fig_0">1</ref>) is divided in two main phases: Indexation phase and Search phase.</p><p>• Indexation phase. Indexation phase consists of arranging the data where the system tries to find the answer of the questions. This process is a main step to accelerate the process. Two different indexation are carried out: IR-n and QA indexation. The first one is carried out by IR-n system and it is independent from the second one, in which more syntactic and semantic information is stored. For example, the QA indexation stores the NP, VP and PP obtained from the parsing, and it also stores the results of the WSD process.</p><p>• Search phase. This phase follows the most commonly used schema. The three main modules of our approach are: These modules are described below. Previously, the used annotation is commented. The symbols "[ ]" delimit a SB (NP, VP and PP), "sp" is a preposition of a PP, the term "ap" indicates that PP is an apposition of the previous nominal head, SOL is the place where the answer can be found and the symbols "[. . .]" indicate some irrelevant SB for the search.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2">Question analysis</head><p>In this step the system carries out two tasks:</p><p>• To detect the type of information that the answer has to satisfy to be a candidate of answer (proper name, quantity, date . . .).</p><p>• To select the question terms (keywords) that make possible to locate those documents that can contain the answer.</p><p>We have based on WordNet Based-Types and EuroWordNet Top-Concepts in order to develop our taxonomy that consists of the next categories: person, group, object, place, place city, place capital, place country, abbreviation, event, numerical quantity, numerical economic, numerical age, numerical measure, numerical period, numerical percentage, temporary year, temporary month, temporary date and definition.</p><p>The expected answer type is achieved using a set of syntactic patterns. The question posed to the system is compared with all the patterns of all the categories. For each category a score is assigned that measures its probability of being the correct type. We choose the category having the highest probability.</p><p>We have 173 syntactic patterns for the determination of the different semantic category of our ontology. The system compares the SB of the patterns with the SB of the question, the result of the comparison determines the category of the question.</p><p>The next example shows the behavior of question analysis:</p><p>• Question:</p><p>-Quién es el Secretario General de la ONU? (Who is the General Secretary of the ONU?)</p><formula xml:id="formula_0">• Syntactic Block − [IP quién](who) − [VP ser ](to be) − [NP secretario general [PP, de: onu]] (General Secretary of the ONU)</formula><p>We have a pronoun or interrogative particle quién (who) followed by two syntactic blocks: a verb phrase and a nominal phrase. This example matches with the next pattern:</p><formula xml:id="formula_1">[IP, quién | quiénes] (who) [VP, ser ] (to be) [NP, hipónimo persona] (hyponim person)</formula><p>therefore, the category of the question is person. For each SB of the pattern, we keep a flag in order to determine whether the SB of the question is considered for the next stage of the QA process or not.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.3">Selection of relevant passages</head><p>This second module of the QA process creates and retrieves passages using IR-n system <ref type="bibr" target="#b5">[6]</ref>. The goal of IR-n system is to extract a set of passages, where at least one passage contains the answer for the input question.</p><p>The inputs of IR-n are the detected keywords in question analysis, IR-n returns a list of passages where we apply the extraction of the answer process. Beside, the objective of this task is reducing complexity of the process of searching the solution by means of reducing the amount of text in which the system searches for the answer.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.4">Extraction of the answer</head><p>The final step of QA is the extraction of the answer. In this module, the system takes the set of retrieved passages by IR-n and tries to extract a concise answer to the question.</p><p>Moreover, the type of question, SB of the question and a set of syntactic patterns with lexical, syntactic and semantic information are used in order to find a possible answer.</p><p>As shown in the next list, the system use the following NLP techniques.</p><p>• Lexical level. Grammatical category of answer must be checked according to the type of the question. For example, if we are searching for a person, the proposed SB as possible answer has to contain at least a noun.</p><p>• Syntactic level. Syntactic patterns have been defined. Those let us to look for the answer inside the recovered passages.</p><p>• Semantic level. Semantic restrictions must be checked. For example, if the type of the question is city the possible answer must contain a hyponym of city in EuroWordNet. Semantic restrictions are applied according to the type of the questions. Some types are not associated with semantic restrictions, such as quantity.</p><p>In order to design and group the patterns in several sets, the cases of the question are used. The patterns are classified in the followings three cases:</p><p>• Case 1. In the question, one SB of type NP or PP is only detected. For example:</p><p>-Question: Who is the president of Yugoslavia?</p><p>We only have a SB, the verb to be that is not used to find the answer because it is a copulative verb.</p><p>-SB: [NP, president [PP, of: Yugoslavia]]</p><p>• Case 2. A VP is detected in the question. This verb expresses an action that must be used in order to search the answer. For example:</p><p>-Question: Who did write Star Trek?</p><formula xml:id="formula_2">-SB: [VP, to write] [NP, star * trek]</formula><p>• Case 3. VP is preceded by a NP or PP. In this case we used three sections to find out the possible answer.</p><p>-Question: Which team did win the NBA tournament?</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>-SB: [NP, team] [VP, to win] [NP, NBA * tournament]</head><p>When the system tries to find a possible answer in a sentence, first, the SB of the question are localized in the text, secondly the system attempts to match the pattern in the sentence. If this has been possible, then a possible answer has been founded that must be appraised using lexical and semantic restrictions according to the type of the question. Spanish QA system has about 60 patterns, the number of patterns that is processed in each sentence depends on the type of the question. Therefore, a question of case 1 and type "person" processes different patterns than a question of case 1 and type "place city" .</p><p>The next example shows the used pattern and the behavior the extraction of the answer:</p><formula xml:id="formula_3">• [SOL[PP, sp: NP1]] [. . .] [VP][. . .] [NP2]</formula><p>First, NP2 (or PP2) and VP are searched by the system, afterward the NP1 with the answer must be found. Next example shows the process: </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.4.1">Value of the solution</head><p>In order to select the answer from a set of candidates, each possible answer is scored. The calculation of the value of the solution on each pattern is described in this subsection of the paper. The score of a candidate is structured in three phases: comparison of the terms inside a nominal head of a SB with the terms of the nominal head of another SB, comparison of a SB of the question with a SB of the text and weighting of a pattern according to the different SB.</p><p>Comparison of the terms of a nominal head. When the system is comparing two terms, the system does not only contemplate the literal value of terms, also checks the relations between these terms in EuroWordNet. So, weighting of terms is calculated using the equation 1, where N is the number of terms inside nominal head and pt i is the value of the terms that is calculated using EuroWordNet (1 same lemma, 0.8 synonym and 0.6 hyponim).</p><formula xml:id="formula_4">P t = N i=1 pt i N (1)</formula><p>The equation 2 shows the process of comparison between the terms "old Bosnian leader" and "senior Bosnian sailor" obtaining the following results:</p><formula xml:id="formula_5">P t = 0.8 + 1 + 0 3 = 0.6<label>(2)</label></formula><p>where old and senior have a synonym relation and both SB contain the lemma Bosnian. If the number of terms is different, the system divides using the great number.</p><p>Comparison of the SB. In our approach, the comparison of the SB occurs in two kinds of circumstances. When the SB of the question is localized in the text in order to apply a pattern and when the system is analizing a SB to find the answer.</p><p>The first type of comparison is called "value of terms", this measure can be affected by fixed circumstances, such as:</p><p>• Depth of appearance. The terms of the SB of the question may not appear as nominal heads in a SB of the text.</p><p>• Excess or missing of modifiers. If the nominal head of the question has more or less modifiers its value is penalized.</p><p>• Appearance of terms, but some complements are missing. When the system detects only the term of the question in the text, them it continues the searching until it is able to find the complements.</p><p>Second type of comparison of SB is the calculation of the value of solution, this value is calculated when it is searching for a possible answer. It takes into account a set of evaluation rules according to the type of the question, such as:</p><p>• Lexical restrictions. Grammatical category of the answer depends on the type of the question. For example, a question of type "persona (person)" the answer must have at least a proper noun or common noun.</p><p>• Semantic restrictions. The system leaks the answer according to semantic relations such as hyponimy. For example, a question of type "ciudad (city)" the answer must be a hyponim of "ciudad (city)" in EuroWordNet.</p><p>• Ad-hoc restrictions. An example of this kind of restrictions is founded in the questions of type "fecha (date)", when the system penalizes the value of solution if the answer does not contain day, month and year.</p><p>Comparison of the patterns. When the system is evaluating a pattern in the text, a set of circumstances are considerate in order to provide the value of solution. The total value of an answer is defined by the equation 3, where N is the number of retrieved SB of the question, vt i is the value of terms of each SB, d is the distance between the localized SB in the text and vs is the value of solution. As shown in the equation 3, vs is 30% of total and the remaining ones is the 70%.</p><formula xml:id="formula_6">V r = ( N i=1 vt i N − d * 0.1) * 0.7 + vs * 0.3<label>(3)</label></formula><p>Final evaluation of patterns. The system generates a list of candidate solutions, where each solution has been obtained in a passage. If two solutions have the same value for a question, the system chooses one considering the proposed order by IR-n. Spanish QA system must determine when a question has answer or not. In order to do that we suggest an umbral that indicates if an answer is solution or not. A question has answer if its V r is higher than 0.5.</p><p>Next, an example (question 114, In Workshop of Cross-Language Evaluation Forum (CLEF 2003)) of resolution of one question, where system chooses the correct solution since the V r is higher than 0.5.</p><p>• Question: A qué primer ministro abrió la Fiscalía de Milán un sumario por corrupción? ( To whom prime minister the Office of the public prosecutor of Milan opened a summary for corruption?)</p><p>• Type: person  • Value of the solution:</p><p>V r = ( 0([N P,f iscalia[P P,de:milan]])+0.45([N P,sumario[P P,por:corrupcion]])+1([N P,primer * ministro])+0([V P,abrir]) 4 − 0 * 0.1) * 0.7 + 1(Bettino − Craxi) * 0.3 = 0.52</p><formula xml:id="formula_7">• Answer: Silvio Berlusconi</formula><p>As the previous example shows, the system chooses the correct answer among several possible solutions. The correct answer has been chosen due to the value of terms.</p><p>In the sentence, with the right answer, the value of terms is higher than in other sentences. Although, the VP and NP1 are in both sentences, the NP2 is just completely in the first sentence.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Results</head><p>This section describes some tables related with the results and the evaluation of our system in CLEF-2005. The proposed system was applied to the set of 200 questions, all of them was supported by our system.</p><p>For the development of our system we used as training set the questions developed for CLEF-2003 and CLEF-2004 questions.</p><p>During this test process many faults were detected in the tools used in the lexical and morphological phases. The analysis of question 145 of CLEF-2003 shows one of these errors:</p><p>• Quién es el ministro de economía alemán? (Who's the German Minister of Finance?)</p><p>The term Alemán is not in the prepositional phrase where the term economía is, because of economía is tagged as feminine and aleman is tagged as masculine. So, when searching for SB in the corpus to find an answer for the questions, it gives wrong answers.</p><p>We submitted two runs. The first run was obtained applying the system after repairing the lexical and morphological errors that we have detected (alia051eses) while the second run (alia52eses) performed QA process without repairing theses faults. Table <ref type="table" target="#tab_1">1</ref> shows the results for each run and how theses errors lowered our system performance giving wrong answers.</p><p>Inexact answers also lower the system performance and in our system, these are due to errors in parsing process. An answer was judged inexact when the answer string contained more or less than just the correct answer, ie. the system finds this answer in the text but it does not extract the part of the information needed to return it as an answer. Our system returned 24 inexact answers (see Table <ref type="table" target="#tab_1">1</ref>). We may obtain a higher level of performance (45%) if we take into account that these inexact answers include the expected answer.</p><p>Finally, Table <ref type="table" target="#tab_2">2</ref> shows that the accuracy over temporal questions was 34.38%, ie. we have obtained 11 right answers over 32. This is considered a good score because no special mechanism was developed.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Conclusion and future work</head><p>For our first participation in the QA@CLEF track, we proposed a QA system designed to search Spanish documents in response to Spanish queries. To do so we used a Spanish syntactic analyzer in order to assist in identifying the expected answers and the solution of the question.</p><p>All questions given in this track have been supported by our system. The results showed overall accuracy levels of 33%.</p><p>As previously mentioned, the used tools reduce exactitude to our system (alia52eses). These are encountering results that show the potential of the proposed approach, taking into account that the use of patterns is a less expensive recourse compared with other proposals.</p><p>Ongoing work on the system is focused on multilingual task, temporal question treatment and the incorporation of knowledge to those phases that can be useful to increase the our system performance.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: System architecture</figDesc><graphic coords="3,131.42,125.30,339.74,284.16" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>• Case: 3 ••• 4 − 0</head><label>340</label><figDesc>List of BS: -NP1: ([NP, primer*ministro]) -VP: ([VP, abrir]) -NP2:([NP, fiscalia [PP, de: milan]])([NP, sumario [PP, por: corrupcion]]) Text where is a correct solution: "[. . .] la Fiscalía de Milán abrió, hoy martes, un sumario al primer ministro, Silvio Berslusconi, por un supuesto delito de corrupción [. . .]" Value of the solution:V r = ( 1([N P,f iscalia[P P,de:milan]])+0.65([N P,sumario[P P,por:corrupcion]])+1([N P,primer * ministro])+1([V P,abrir]) * 0.1) * 0.7 + 1(Silvio − Berlusconi) * 0.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_0"><head>•</head><label></label><figDesc>Question: ¿Qué presidente de Corea del Norte murió a los 80 años de edad? (What North Korea's president died at the age of 80? )</figDesc><table><row><cell>• List of SB: [NP, north * korea * president] [VP, to death] [PP, at: age [PP, of: 80]</cell></row><row><cell>• Text: [. . .] Kim Il Sung, presidente de Corea del Norte, murió ayer a los 82 años [. . .] ([. . .] Kim Il</cell></row><row><cell>Sung, president of North Korea, died yesterday at the age of 80 [. . .])</cell></row><row><cell>• List of SB of sentence: [. . .] [NP, kim * il *  sung [PP, apposition: president [PP, of: north * korea]]]</cell></row><row><cell>[VP, to death] [PP, at: age [PP, of: 80] [. . .]</cell></row><row><cell>• Answer: Kim Il Sung</cell></row><row><cell>• Type: person</cell></row><row><cell>• Case: 3</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_1"><head>Table 1 :</head><label>1</label><figDesc>General results obtained for each runs</figDesc><table><row><cell>3 = 0.93</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_2"><head>Table 2 :</head><label>2</label><figDesc>Accuracy over questions• Text where is an incorrect solution: "[. . .] primer ministro y líder socialista, Bettino Craxi, al que el pasado 21 de septiembre Paraggio abrió un sumario relacionado con el proyecto Limen por supuestos delitos de corrupción [. . .]"</figDesc><table /></figure>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Acknowledgments</head><p>This research has been partially funded by the Spanish Government under project CICyT number TIC2003-07158-C04-01 and by the Valencia Government under project number GV04B-268.</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">MACO: Morphological Analyzer Corpus-Oriented</title>
		<author>
			<persName><forename type="first">S</forename><surname>Acebo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Ageno</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Climent</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Farreres</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Padró</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Placer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Rodriguez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Taulé</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Turno</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ESPRIT BRA-7315 Aquilex II</title>
				<imprint>
			<date type="published" when="1994">1994</date>
			<biblScope unit="volume">31</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">TALP-QA System for Spanish at CLEF-2004</title>
		<author>
			<persName><forename type="first">A</forename><surname>Ageno</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Ferrés</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>González</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Kanaan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Rodríguez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Surdeanu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Turmo</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Workshop of Cross-Language Evaluation Forum (CLEF)</title>
				<imprint>
			<date type="published" when="2004">2004</date>
			<biblScope unit="page" from="425" to="434" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">miraQA: Initial experiments in Question Answering</title>
		<author>
			<persName><forename type="first">C</forename><surname>De Pablo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">L</forename><surname>Martínez-Fernández</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Martínez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Villena</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">M</forename><surname>García-Serrano</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">M</forename><surname>Goñi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">C</forename><surname>González</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Workshop of Cross-Language Evaluation Forum (CLEF)</title>
				<imprint>
			<date type="published" when="2004">2004</date>
			<biblScope unit="page" from="371" to="376" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">An Empirical Approach to Spanish Anaphora Resolution. Machine Translation</title>
		<author>
			<persName><forename type="first">A</forename><surname>Ferrández</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Palomar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Moreno</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Special Issue on Anaphora Resolution In Machine Translation</title>
		<imprint>
			<biblScope unit="volume">14</biblScope>
			<biblScope unit="issue">3/4</biblScope>
			<biblScope unit="page" from="191" to="216" />
			<date type="published" when="1999-12">December 1999</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Question Answering Pilot Task at CLEF</title>
		<author>
			<persName><forename type="first">J</forename><surname>Herrera</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Peñas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Verdejo</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Workshop of Cross-Language Evaluation Forum (CLEF)</title>
				<imprint>
			<date type="published" when="2004">2004. 2004</date>
			<biblScope unit="page" from="445" to="452" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Ir-n, a passage retrieval system</title>
		<author>
			<persName><forename type="first">F</forename><surname>Llopis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">L</forename><surname>Vicedo</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Workshop of Cross-Language Evaluation Forum (CLEF)</title>
				<imprint>
			<date type="published" when="2001">2001</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">COLE at CLEF 2004: Rapid prototyping of a QA system for Spanish</title>
		<author>
			<persName><forename type="first">E</forename><surname>Méndez-Díaz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Vilares-Ferro</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Cabrero-Souto</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Workshop of Cross-Language Evaluation Forum (CLEF)</title>
				<imprint>
			<date type="published" when="2004">2004</date>
			<biblScope unit="page" from="413" to="418" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">The Use of Lexical Context in Question Answering for Spanish</title>
		<author>
			<persName><forename type="first">M</forename><surname>Pérez-Coutiño</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Solorio</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Montes Y Gómez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>López-López</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Villaseñor-Pineda</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Workshop of Cross-Language Evaluation Forum (CLEF)</title>
				<imprint>
			<date type="published" when="2004">2004</date>
			<biblScope unit="page" from="377" to="384" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">Question Answering in Spanish. Comparative Evaluation of Multilingual Information Access Systems</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">L</forename><surname>Vicedo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Izquierdo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Llopis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Muñoz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">4th Workshop of the Cross-Language Evaluation Forum</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<meeting><address><addrLine>Trondheim, Norway</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2003">August 21-22, 2003. 2004. 2003</date>
			<biblScope unit="volume">3237</biblScope>
			<biblScope unit="page">541</biblScope>
		</imprint>
		<respStmt>
			<orgName>CLEF</orgName>
		</respStmt>
	</monogr>
	<note>Revised Selected Papers</note>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Does English helps Question Answering in Spanish?</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">L</forename><surname>Vicedo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Saiz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Izquierdo</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Workshop of Cross-Language Evaluation Forum (CLEF)</title>
				<imprint>
			<date type="published" when="2004">2004</date>
			<biblScope unit="page" from="419" to="424" />
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
