<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">THEaiTRE: Artificial Intelligence to Write a Theatre Play</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Rudolf</forename><surname>Rosa</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics and Physics</orgName>
								<orgName type="department" key="dep2">Institute of Formal and Applied Linguistics</orgName>
								<orgName type="institution">Charles University</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">The Švanda Theatre in Smíchov</orgName>
								<address>
									<settlement>Prague</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Ondřej</forename><surname>Dušek</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics and Physics</orgName>
								<orgName type="department" key="dep2">Institute of Formal and Applied Linguistics</orgName>
								<orgName type="institution">Charles University</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">The Švanda Theatre in Smíchov</orgName>
								<address>
									<settlement>Prague</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Tom</forename><surname>Kocmi</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics and Physics</orgName>
								<orgName type="department" key="dep2">Institute of Formal and Applied Linguistics</orgName>
								<orgName type="institution">Charles University</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">The Švanda Theatre in Smíchov</orgName>
								<address>
									<settlement>Prague</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">David</forename><surname>Mareček</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics and Physics</orgName>
								<orgName type="department" key="dep2">Institute of Formal and Applied Linguistics</orgName>
								<orgName type="institution">Charles University</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">The Švanda Theatre in Smíchov</orgName>
								<address>
									<settlement>Prague</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Tomáš</forename><surname>Musil</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics and Physics</orgName>
								<orgName type="department" key="dep2">Institute of Formal and Applied Linguistics</orgName>
								<orgName type="institution">Charles University</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">The Švanda Theatre in Smíchov</orgName>
								<address>
									<settlement>Prague</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Patrícia</forename><surname>Schmidtová</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics and Physics</orgName>
								<orgName type="department" key="dep2">Institute of Formal and Applied Linguistics</orgName>
								<orgName type="institution">Charles University</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">The Švanda Theatre in Smíchov</orgName>
								<address>
									<settlement>Prague</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Dominik</forename><surname>Jurko</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics and Physics</orgName>
								<orgName type="department" key="dep2">Institute of Formal and Applied Linguistics</orgName>
								<orgName type="institution">Charles University</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">The Švanda Theatre in Smíchov</orgName>
								<address>
									<settlement>Prague</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Ondřej</forename><surname>Bojar</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics and Physics</orgName>
								<orgName type="department" key="dep2">Institute of Formal and Applied Linguistics</orgName>
								<orgName type="institution">Charles University</orgName>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">The Švanda Theatre in Smíchov</orgName>
								<address>
									<settlement>Prague</settlement>
								</address>
							</affiliation>
						</author>
						<author role="corresp">
							<persName><forename type="first">Daniel</forename><surname>Hrbek</surname></persName>
							<email>hrbek@svandovodivadlo.cz</email>
						</author>
						<author>
							<persName><forename type="first">David</forename><surname>Košt'ák</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Martina</forename><surname>Kinská</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Josef</forename><surname>Doležal</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Klára</forename><surname>Vosecká</surname></persName>
						</author>
						<title level="a" type="main">THEaiTRE: Artificial Intelligence to Write a Theatre Play</title>
					</analytic>
					<monogr>
						<imprint>
							<date/>
						</imprint>
					</monogr>
					<idno type="MD5">822AB0FDB5B0B7507F55437F172C3BD3</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-19T15:28+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>We present THEaiTRE, a starting research project aimed at automatic generation of theatre play scripts. This paper reviews related work and drafts an approach we intend to follow. We plan to adopt generative neural language models and hierarchical generation approaches, supported by summarization and machine translation methods, and complemented with a human-in-the-loop approach.</p><p>2 Related Work</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1">Narrative Natural Language Generation</head><p>While we are not aware of any generation systems specifically aimed at theatre play generation, research in story/narrative generation has been quite active in the past years, with</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>We introduce the THEaiTRE project, 1 which aims to produce and stage the first computer-generated theatre play. This play will be presented on the occasion of the 100th anniversary of Karel Čapek's play R.U.R. <ref type="bibr">[ Čapek, 1920]</ref>, for which the word "robot" was invented by Čapek.</p><p>The project, currently in its early stages, is at the intersection of artificial intelligence research and theatre studies. The core of our approach is to use state-of-the-art deep neural models trained and fine-tuned on theatre play data. However, our team includes both experts on natural language processing and theatre experts, and our solution will be based on research and exchange of experience from both fields.</p><p>In this paper, we first review related previous works (Section 2) and data resources available to us (Section 3). We then draft the approaches we are following and intending to follow in the project (Section 4) and present the project timeline (Section 5).</p><p>computer-aided systems allowing various degrees of automation and different abilities in learning from data <ref type="bibr" target="#b10">[Kybartas and Bidarra, 2017;</ref><ref type="bibr" target="#b12">Riedl, 2018]</ref>. Since recurrent neural networks (RNN) were applied for text generation <ref type="bibr" target="#b1">[Bahdanau et al., 2015;</ref><ref type="bibr" target="#b19">Sutskever et al., 2014]</ref>, research in story generation has mostly focused on fully data-driven, fully automated approaches. As plain RNNs were found unsuitable for producing longer, coherent texts <ref type="bibr" target="#b22">[Wiseman et al., 2017]</ref>, multiple improvements have been proposed.</p><p>The first line of work focuses on providing a higher-level semantic representation to the networks and conditioning the generation on it. <ref type="bibr" target="#b12">Martin et al. [2018]</ref> and <ref type="bibr" target="#b0">Ammanabrolu et al. [2019;</ref><ref type="bibr" target="#b1">2020]</ref> use an event-based representation, where an event roughly represents a clause (predicate, subject, direct and indirect object). The model generates the story at the event level and subsequently realizes the individual events to surface sentences. <ref type="bibr" target="#b21">Tu et al. [2019]</ref> take a similar approach, using frame semantics and also conditioning sentence generation on other information, such as sentiment.</p><p>Other works focus on explicit entity modelling across the generated story, e.g., <ref type="bibr" target="#b4">Clark et al. [2018]</ref>. Here, each entity has its own distributed representation (embedding), which is updated on each mention of the entity in the story.</p><p>Multiple authors attempt to increase long-term coherence by hierarchical story generation. <ref type="bibr" target="#b6">Fan et al. [2018]</ref> generate first a short prompt/tagline, then use it to condition the full story generation. <ref type="bibr" target="#b23">Yao et al. [2019]</ref> take a similar approach, using a "storyline" -a list of entities and items to be introduced in the story in the given order. Fan et al. <ref type="bibr">[2019]</ref> then combine the hierarchical generation with explicit entity modelling. Their system generates outputs using anonymized but tracked entities, which are subsequently lexicalized in the context of the story by generating referring expressions.</p><p>Several works experiment with altering the base RNN architecture: Wang and Wan [2019] use a modified Transformer architecture <ref type="bibr" target="#b21">[Vaswani et al., 2017]</ref>, which is trained as a conditional variational autoencoder. <ref type="bibr" target="#b19">Tambwekar et al. [2019]</ref> utilize reinforcement learning with automatically induced rewards to train their event-based model. <ref type="bibr" target="#b0">Ammanabrolu et al. [2019;</ref><ref type="bibr" target="#b1">2020]</ref>  [2019] apply GPT-2 fine-tuned for both story generation and common-sense reasoning to improve coherence.</p><p>While research in this area has progressed considerably, most experiments have been performed on rather short and simple stories, such as the ROCStories corpus <ref type="bibr" target="#b14">[Mostafazadeh et al., 2016]</ref>. Many works focus on limited tasks, such as single-sentence continuation generation <ref type="bibr" target="#b21">[Tu et al., 2019]</ref>. The state-of-the-art results still cannot match human performance, producing repetitive and dull outputs <ref type="bibr">[See et al., 2019]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2">Dramatic Analysis</head><p>For our needs, we are mostly interested in classifications and abstractions over theatre play scripts or their parts. In the field of theatre studies, there is a vast amount of research on the structure and interpretation of theatre plays. Unfortunately, the results of such research are not made available in forms and formats that would easily allow us to use these as data and annotations in machine learning approaches.</p><p>The Thirty-Six Dramatic Situations by <ref type="bibr" target="#b15">Polti [1921]</ref> <ref type="foot" target="#foot_0">2</ref> is a classic work, in which the author presented a supposedly ultimate list of all categories of possible dramatic situations that can occur in a theatre play (e.g. "adultery" or "conflict with a god"), further subclassified into 323 situational possibilities.</p><p>Although not directly related to theatre plays, the work of <ref type="bibr">[Propp, 1968]</ref> is also essential. Propp analyzed Russian folk tales and identified 31 functions, similar to Polti's situations but somewhat more down-to-earth (e.g. "villainy" or "wedding"), as well as 7 abstract character types (e.g. "villain" or "hero") and other abstractions.</p><p>Polti's and Propp's categorizations are sometimes used in analyzing and generating narratives, although typically not in drama. The works closest to our focus is probably that of <ref type="bibr">[Gervás et al., 2016]</ref> or <ref type="bibr">Lombardo et al. [2018]</ref>, who devised an ontologies of abstractions for annotating scripts, based on both of the mentioned works, as well as on more recent plot categorization studies <ref type="bibr" target="#b2">[Booker, 2004;</ref><ref type="bibr" target="#b20">Tobias, 2011]</ref>.</p><p>There are also works producing drama analyses in the form of networks, capturing various relations between the characters in the play <ref type="bibr" target="#b14">[Moretti, 2014;</ref><ref type="bibr" target="#b9">Horstmann, 2019;</ref><ref type="bibr" target="#b7">Fischer et al., 2019]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.3">Computer-Generated Art</head><p>There already is a range of partially or fully artificially generated works of art -e.g. a short sci-fi movie with an LSTMgenerated and human-post-edited script <ref type="bibr">[Benjamin et al., 2016]</ref>, a musical based on suggestions from several automated tools <ref type="bibr" target="#b5">[Colton et al., 2016]</ref>, a human-picked collection of computer generated poems <ref type="bibr">[Materna, 2016]</ref>, or a theatre play written with the help of a next word suggestion tool <ref type="bibr" target="#b8">[Helper, 2018]</ref>. While this demonstrates the technical possibility of such an approach, the mixed reception of the outcomes shows that the employed technologies are not (yet?) on par with humans <ref type="bibr">[See et al., 2019]</ref>. We thus believe a more specialized and complex approach is needed here.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Data Resources</head><p>Theatre play scripts are not easily available for our purposes. As no reasonable corpus is available, we have to create one ourselves. The corpus will contain Czech and English theatre play scripts and synopses (plot summaries), and will be used to train and fine-tune our systems, described in following sections. We are also collecting film and TV series scripts, which are easier to obtain in large quantities, although they are not a perfect match for our setting. Unfortunately, due to copyright reasons, we will not be able to release the full corpus.</p><p>In most cases, scripts cannot be downloaded for free, and for most scripts it seems that they are only available in print or scanned. Even electronically available scripts come in various formats and there seems to be no technical standards in this respect. For our project, we need to devise a common representation format, and automatically or semi-automatically convert and normalize the data into the format, marking character names, lines, scenic notes, scene settings, etc. <ref type="bibr" target="#b6">[Croce et al., 2019]</ref>. Also the scripts and synopses need to be paired together. At the moment, we only have collected and partially converted several hundreds of documents.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Planned Approach</head><p>As a theatre play script is a highly structured and complex piece of text, we plan to take a hierarchical approach composed of several steps to generating the full script, also employing human inputs in the process. The overall idea is to start from a brief description of the play, gradually expanding it into more detailed act and scene synopses, and finally generating the individual scene dialogues. We currently envision using generative neural models for the final step (Section 4.1), conditioned by prompts generated by hierarchical generation approaches (Section 4.2).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.1">Applying Neural Language Models</head><p>Large neural language models (LMs), such as GPT-2 [Radford et al., 2019; see Section 2.1], are able to generate believable texts in certain domains (e.g. news articles). This is not the case for the domain of theatre plays. The original GPT-2 must have had a number of plays (or movie scripts) in the training data, which is evident when it is presented with a suitable starting prompt. It can produce a text that follows the formal structure and has some level of content coherence. However, the basic attributes of a dramatic situation are missing: there is no plot, and the scene is not moving towards a conclusion. Other problems include having new characters appear randomly in the middle of the scene or falling into a state of repeating the same sentence forever.</p><p>Our basic workflow would be to seed an LM with a prompt which is the beginning of a dramatic situation. The LM would generate the rest of the whole dialogue. We plan to finetune the LM to theatre plays to see how far this approach can go. Then we plan to restrict the generation by enforcing that only certain predetermined characters speak, possibly in a pregenerated order. This can be achieved by stopping the generation at the end of a character's line, adding the name of the next desired character and then resuming the generation process.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>2</head><p>To make the characters more internally consistent and different from each other at the same time, we plan to devise individual LMs specialized to specific character types, based on a clustering of the characters across plays. The part of each character would then be generated by a different LM; i.e., the script would consist of several LMs "talking" to each other.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2">Hierarchical Generation</head><p>We also plan to extend our experiments with hierarchical generation from large pretrained LMs. We will use an approach similar to Fan et al. <ref type="bibr">[2018]</ref> and <ref type="bibr" target="#b23">Yao et al. [2019]</ref> (see Section 2.1): starting with generating a title or a prompt for the story, then generating a textual synopsis. The generation of the play from the synopsis will follow as a novel step, not present in previous works. We are considering multiple options of what to choose as the synopsis representation: the play background/setting from play databases, more detailed synopses from fan websites, or scenic remarks extracted from texts of plays themselves. Ultimately, the choice will be made based on data availability. The setup will also include generating "play metadata", such as the main theme, list of characters, narrative type, etc.</p><p>The final step will use a similar approach as the base LM generation (see Section 4.1). We also plan on using explicit embeddings for individual characters in the play and using explicit entity tracking/coreference <ref type="bibr" target="#b4">[Clark et al., 2018;</ref><ref type="bibr" target="#b6">Fan et al., 2019]</ref>. Since the available automatic coreference tools [e.g., <ref type="bibr" target="#b3">Clark and Manning, 2016;</ref><ref type="bibr" target="#b11">Lee et al., 2017]</ref> are typically not trained for processing dialogic texts, they may require adaptation.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.3">Data Synthesis through Summarization</head><p>The hierarchical generation approach relies on data that contain information of various granularity, as described in Section 4.2. However, most of the available data contain only the title and the script of the play, missing other invaluable information. In our project, we intend to synthesize the missing data; synthetic data are frequently used in various tasks, such as machine translation <ref type="bibr">[Bojar and Tamchyna, 2011;</ref><ref type="bibr" target="#b17">Sennrich et al., 2016]</ref>.</p><p>We can generate synthetic data with the use of the classical task of text summarization; abstractive summarization in particular <ref type="bibr" target="#b17">[Rush et al., 2015]</ref>. The main idea is to take a long document and summarize it into a few sentences, then take these synthetic data and use them for training the generative models in the hierarchical approach. With various summarizing models, we can first abstract the whole script of a theatre play into a detailed synopsis, then the detailed synopsis into a short plot synopsis, and eventually the short synopsis into the play title. With these summarizing models, we can fill the gaps in our datasets, so that the hierarchical generation models can be trained on all theatre scripts available to us, even if they lack some or all higher-level summaries.</p><p>We plan to train the Transformer model <ref type="bibr" target="#b21">[Vaswani et al., 2017]</ref> for the summarization tasks. As we expect the amount of available training play-summary pairs to be scarce, we will pretrain our models on other summarization tasks, such as news abstract generation for which plenty of parallel data is available <ref type="bibr" target="#b18">[Straka et al., 2018]</ref>, followed by fine-tuning the pretrained models on our in-domain theatre data.</p><p>Due to the specific nature of the genre, where a lot of what is meant is not explicitly said by any of the characters, we know that the summarization may be difficult or impossible to do, and this component thus cannot be entirely relied on.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.4">Machine Translation</head><p>We plan on using machine translation (MT) for two purposes:</p><p>(1) Since we have limited amounts of training data scattered across both English and Czech, we need the generation to take advantage of data in both languages. Therefore, we plan to generate new training data by translating either Czech texts to English or vice versa. (2) We would like the same resulting generated play to be available instantly in both languages. Therefore, we plan to generate it in one of the languages and use MT to bring the result over to the other language.</p><p>For both applications, we are going to use our in-house state-of-the-art Czech-English model <ref type="bibr" target="#b16">[Popel, 2018]</ref>. However, theatre play scripts are a specific domain of data for which our MT models were not specifically trained. To tackle this problem, we will finetune [Miceli <ref type="bibr" target="#b13">Barone et al., 2017]</ref> the general MT models on theatre parallel data, possibly also applying automated heuristical pre-processing and/or postediting <ref type="bibr" target="#b17">[Rosa et al., 2012]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.5">Human in the Loop</head><p>To ensure a satisfactory result, we intend to complement the automated generation with interventions from theatre professionals, using a human-in-the-loop approach.</p><p>We currently envision using the automated system to generate texts and the human to choose parts of the output to use in the play. This could be done e.g. in an iterative interactive way, where the system generates several options for a line of the script, the human picks one of the options to add to the script, the system generates continuation options, etc.</p><p>Moreover, only the dialogues of the characters will be fully automatically generated. The subsequent realization and performance of the play will be in the hands of theatre professionals, who will analyze and interpret the script, devise stage directions, rehearse the play, design the scene, and finally perform the play for a live audience, all of which will further shape the perception of the play by the spectators.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Conclusion and Future Work</head><p>After some preliminary work, the project started in April 2020. The first automatically generated THEaiTRE play will be premiered in January 2021, at the occasion of the 100th anniversary of the premiere of the play R.U.R. <ref type="bibr">[ Čapek, 1920]</ref>. A premiere of a second play, generated from an improved version of our system, is planned for 2022.</p><p>The project can be tracked at https://theaitre.com</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_0"><head></head><label></label><figDesc>2019], for generation.See et al. [2019]  use GPT-2 directly and show that it is superior to plain RNNs. Mao et al.</figDesc><table><row><cell>ford et al.,</cell></row><row><cell>extend this work by experimenting with var-</cell></row><row><cell>ious sentence realization techniques, including retrieval from</cell></row><row><cell>database and post-editing.</cell></row><row><cell>Latest works use massive pretrained language models</cell></row><row><cell>based on the Transformer architecture, such as GPT-2 [Rad-</cell></row><row><cell>1</cell></row></table></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_0">https://en.wikipedia.org/wiki/The_Thirty-Six_Dramatic_ Situations</note>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgments</head><p>The THEaiTRE project is supported by the Technology Agency of the Czech Republic grant TL03000348. and partially supported by SVV project number 260 575.</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">Guided Neural Language Generation for Automated Storytelling</title>
		<author>
			<persName><surname>Ammanabrolu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Second Workshop on Storytelling</title>
				<meeting>the Second Workshop on Storytelling<address><addrLine>Florence, Italy</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computational Linguistics</publisher>
			<date type="published" when="2019-08">2019. August 2019</date>
			<biblScope unit="page" from="46" to="55" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Neural Machine Translation by Jointly Learning to Align and Translate</title>
		<author>
			<persName><surname>Ammanabrolu</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1909.03480</idno>
		<idno>arXiv: 1409.0473</idno>
		<ptr target="https://www.youtube.com/watch?v=LY7x2Ihqjmc" />
	</analytic>
	<monogr>
		<title level="m">3rd International Conference on Learning Representations (ICLR2015)</title>
				<editor>
			<persName><forename type="first">Ondřej</forename><surname>Bojar</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Aleš</forename><surname>Tamchyna</surname></persName>
		</editor>
		<meeting><address><addrLine>New York, NY, USA; San Diego, CA, USA; Edinburgh, Scotland</addrLine></address></meeting>
		<imprint>
			<publisher>ACL</publisher>
			<date type="published" when="2011">2020. February 2020. May 2015. 2016. 2016. 2011. 2011</date>
			<biblScope unit="page" from="330" to="336" />
		</imprint>
	</monogr>
	<note>Proceedings of WMT</note>
</biblStruct>

<biblStruct xml:id="b2">
	<monogr>
		<title level="m" type="main">The seven basic plots: Why we tell stories</title>
		<author>
			<persName><forename type="first">Christopher</forename><surname>Booker</surname></persName>
		</author>
		<author>
			<persName><surname>Booker</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2004">2004. 2004</date>
			<publisher>A&amp;C Black</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Deep Reinforcement Learning for Mention-Ranking Coreference Models</title>
		<author>
			<persName><forename type="first">Karel</forename><surname>Čapek</surname></persName>
		</author>
		<author>
			<persName><forename type="middle">R U R</forename><surname>Čapek</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Manning</forename><surname>Clark</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Kevin</forename><surname>Clark</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Christopher</forename><forename type="middle">D</forename><surname>Manning</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Aventinum, Ot. Štorch-Marien</title>
				<meeting><address><addrLine>Praha; Austin, Texas</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computational Linguistics</publisher>
			<date type="published" when="1920">1920. 1920. 2016. November 2016</date>
			<biblScope unit="page" from="2256" to="2262" />
		</imprint>
	</monogr>
	<note>Proceedings of EMNLP</note>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Neural Text Generation in Stories Using Entity Representations as Context</title>
		<author>
			<persName><forename type="first">Clark</forename></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Association for Computational Linguistics</title>
				<meeting><address><addrLine>New Orleans, Louisiana</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2018-06">2018. June 2018</date>
			<biblScope unit="page" from="2250" to="2260" />
		</imprint>
	</monogr>
	<note>Proceedings of NAACL-HLT</note>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">The Beyond the Fence musical and Computer Says Show documentary</title>
		<author>
			<persName><forename type="first">Colton</forename></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Seventh International Conference on Computational Creativity</title>
				<meeting>the Seventh International Conference on Computational Creativity</meeting>
		<imprint>
			<date type="published" when="2016">2016. 2016</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">Automatic recognition of narrative drama units: A structured learning approach</title>
		<author>
			<persName><surname>Croce</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1805.04833</idno>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics</title>
				<meeting>the 56th Annual Meeting of the Association for Computational Linguistics<address><addrLine>New Orleans, LA, USA; Florence, Italy</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2018-06">2019. 2019. 2018. June 2018. July 2019</date>
			<biblScope unit="volume">1</biblScope>
			<biblScope unit="page" from="2650" to="2660" />
		</imprint>
	</monogr>
	<note>Association for Computational Linguistics</note>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Programmable corpora. die digitale literaturwissenschaft zwischen forschung und infrastruktur am beispiel von dracor</title>
		<author>
			<persName><surname>Fischer</surname></persName>
		</author>
		<ptr target="https://github.com/dracor-org/gerdracor" />
	</analytic>
	<monogr>
		<title level="m">DHd 2019 Digital Humanities: multimedial &amp; multimodal. Konferenzabstracts</title>
		<title level="s">Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik</title>
		<meeting><address><addrLine>Frankfurt am Main</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2016">2019. March 2019. 2016. 2016</date>
			<biblScope unit="page" from="194" to="197" />
		</imprint>
	</monogr>
	<note>7th Workshop on Computational Models of Narrative (CMN 2016</note>
</biblStruct>

<biblStruct xml:id="b8">
	<monogr>
		<author>
			<persName><surname>Helper</surname></persName>
		</author>
		<ptr target="https://www.roslynhelper.com/lifestyle-of-the-richard-and-family" />
		<title level="m">Roslyn Helper. Lifestyle of the Richard and family</title>
				<imprint>
			<date type="published" when="2018">2018. 2018</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">DraCor: Drama corpora project</title>
		<author>
			<persName><forename type="first">Jan</forename><surname>Horstmann</surname></persName>
		</author>
		<author>
			<persName><surname>Horstmann</surname></persName>
		</author>
		<ptr target="https://dracor.org/" />
	</analytic>
	<monogr>
		<title level="m">forTEXT. Literatur digital erforschen</title>
				<imprint>
			<date type="published" when="2019">2019. 2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">A Survey on Story Generation Techniques for Authoring Computational Narratives</title>
		<author>
			<persName><forename type="first">Bidarra</forename><surname>Kybartas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Kybartas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Bidarra</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Transactions on Computational Intelligence and AI in Games</title>
		<imprint>
			<biblScope unit="volume">9</biblScope>
			<biblScope unit="issue">3</biblScope>
			<biblScope unit="page" from="239" to="253" />
			<date type="published" when="2017-09">2017. September 2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Improving Neural Story Generation by Targeted Common Sense Grounding</title>
		<author>
			<persName><surname>Lee</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)</title>
				<meeting>the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)<address><addrLine>Copenhagen, Denmark; Hong Kong, China</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2017-09">2017. September 2017. 2018. 2018. 2019. November 2019</date>
			<biblScope unit="page" from="5987" to="5992" />
		</imprint>
	</monogr>
	<note>Association for Computational Linguistics</note>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">Event Representations for Automated Story Generation with Deep Neural Nets</title>
		<author>
			<persName><forename type="first">Martin</forename></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">AAAI</title>
				<meeting><address><addrLine>New Orleans, LA, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Materna</publisher>
			<date type="published" when="2016">2018. 2018. 2016. 2016</date>
		</imprint>
	</monogr>
	<note>Jiří Materna. Poezie umělého světa. Backstage Books</note>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Regularization techniques for fine-tuning in neural machine translation</title>
		<author>
			<persName><forename type="first">Miceli</forename><surname>Barone</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing</title>
				<meeting>the 2017 Conference on Empirical Methods in Natural Language Processing<address><addrLine>Copenhagen, Denmark</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2017-09">2017. September 2017</date>
			<biblScope unit="page" from="1489" to="1494" />
		</imprint>
	</monogr>
	<note>Association for Computational Linguistics</note>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">Story Cloze Evaluator: Vector Space Representation Evaluation by Predicting What Happens Next</title>
		<author>
			<persName><forename type="first">Franco</forename><surname>Moretti</surname></persName>
		</author>
		<author>
			<persName><forename type="first">;</forename><surname>Moretti</surname></persName>
		</author>
		<author>
			<persName><surname>Mostafazadeh</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 1st Workshop on Evaluating Vector-Space Representations for NLP</title>
				<meeting>the 1st Workshop on Evaluating Vector-Space Representations for NLP<address><addrLine>Berlin, Germany</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2014">2014. 2014. 2016. August 2016</date>
			<biblScope unit="page" from="24" to="29" />
		</imprint>
		<respStmt>
			<orgName>Stanford Literary Lab</orgName>
		</respStmt>
	</monogr>
	<note>Association for Computational Linguistics</note>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">The thirty-six dramatic situations</title>
		<author>
			<persName><surname>Polti</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Georges Polti</title>
				<imprint>
			<publisher>JK Reeve</publisher>
			<date type="published" when="1921">1921. 1921</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">Cuni transformer neural mt system for wmt18</title>
		<author>
			<persName><forename type="first">Martin</forename><surname>Popel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Alec</forename><surname>Popel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jeffrey</forename><surname>Radford</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Rewon</forename><surname>Wu</surname></persName>
		</author>
		<author>
			<persName><surname>Child</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Third Conference on Machine Translation</title>
				<editor>
			<persName><forename type="first">David</forename><surname>Luan</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Dario</forename><surname>Amodei</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Ilya</forename><surname>Sutskever</surname></persName>
		</editor>
		<meeting>the Third Conference on Machine Translation<address><addrLine>Belgium, Brussels; OpenAI</addrLine></address></meeting>
		<imprint>
			<date type="published" when="1928">2018. October 2018. 1968. 1928. 1968. 2019. February 2019</date>
			<biblScope unit="page" from="486" to="491" />
		</imprint>
	</monogr>
	<note type="report_type">Technical report</note>
	<note>Language Models are Unsupervised Multitask Learners</note>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">Computational Narrative Intelligence: Past, Present, and Future. Medium</title>
		<author>
			<persName><forename type="first">Mark</forename><surname>Riedl</surname></persName>
		</author>
		<author>
			<persName><forename type="first">;</forename><surname>Riedl</surname></persName>
		</author>
		<author>
			<persName><surname>Rosa</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1909.10705</idno>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics</title>
		<title level="s">Long Papers</title>
		<meeting>the 54th Annual Meeting of the Association for Computational Linguistics<address><addrLine>Lisbon, Portugal; Hong Kong; Berlin, Germany</addrLine></address></meeting>
		<imprint>
			<publisher>Akhila Yerukola</publisher>
			<date type="published" when="2012">2018. February 2018. 2012. 2012. 2015. September 2015. 2019. November 2019. August 2016</date>
			<biblScope unit="volume">1</biblScope>
			<biblScope unit="page" from="86" to="96" />
		</imprint>
	</monogr>
	<note>Association for Computational Linguistics</note>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">SumeCzech: Large Czech News-Based Summarization Dataset</title>
		<author>
			<persName><surname>Straka</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)</title>
				<meeting>the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)<address><addrLine>Miyazaki, Japan</addrLine></address></meeting>
		<imprint>
			<publisher>ELRA</publisher>
			<date type="published" when="2018-05-07">2018. May 7-12, 2018 2018</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">Sequence to sequence learning with neural networks</title>
		<author>
			<persName><surname>Sutskever</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1409.3215</idno>
		<idno>arXiv: 1809.10736</idno>
	</analytic>
	<monogr>
		<title level="m">Controllable Neural Story Plot Generation via Reinforcement Learning</title>
				<meeting><address><addrLine>Macau</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2014">2014. 2014. 2019. August 2019</date>
			<biblScope unit="page" from="3104" to="3112" />
		</imprint>
	</monogr>
	<note>International Joint Conference on Artificial Intelligence</note>
</biblStruct>

<biblStruct xml:id="b20">
	<monogr>
		<title level="m" type="main">MASTER Plots: and how to build them</title>
		<author>
			<persName><forename type="first">Ronald</forename><forename type="middle">B</forename><surname>Tobias</surname></persName>
		</author>
		<author>
			<persName><surname>Tobias</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2011">2011. 2011</date>
			<publisher>Penguin</publisher>
			<biblScope unit="volume">20</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">T-CVAE: Transformer-Based Conditioned Variational Autoencoder for Story Completion</title>
		<author>
			<persName><surname>Tu</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1909.13434</idno>
		<idno>arXiv: 1706.03762</idno>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence</title>
				<meeting>the Twenty-Eighth International Joint Conference on Artificial Intelligence<address><addrLine>Hong Kong; Long Beach, CA, USA; Macao, China</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2017-08">2019. November 2019. December 2017. August 2019</date>
			<biblScope unit="page" from="5233" to="5239" />
		</imprint>
	</monogr>
	<note>International Joint Conferences on Artificial Intelligence Organization</note>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">Challenges in Data-to-Document Generation</title>
		<author>
			<persName><surname>Wiseman</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1707.08052</idno>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing</title>
				<meeting>the 2017 Conference on Empirical Methods in Natural Language Processing<address><addrLine>Copenhagen, Denmark</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2017-09">2017. September 2017</date>
			<biblScope unit="page" from="2243" to="2253" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<monogr>
		<title level="m" type="main">Plan-And-Write: Towards Better Automatic Storytelling</title>
		<author>
			<persName><surname>Yao</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1811.05701</idno>
		<imprint>
			<date type="published" when="2019-01">2019. January 2019</date>
			<publisher>AAAI</publisher>
			<pubPlace>Honolulu, HI, USA</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
