<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">VisionWaves: Aligning Business Process Management and Performance Management to Achieve Business (Process) Excellence</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Marc</forename><surname>Kerremans</surname></persName>
							<email>m.kerremans@visionwaves.com</email>
						</author>
						<author>
							<persName><forename type="first">Michael</forename><surname>Westergaard</surname></persName>
							<email>m.westergaard@tue.nl</email>
							<affiliation key="aff0">
								<orgName type="department">Department of Mathematics and Computer Science</orgName>
								<orgName type="institution">Eindhoven University of Technology</orgName>
								<address>
									<country key="NL">The Netherlands</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">National Research University Higher School of Economics</orgName>
								<address>
									<postCode>101000</postCode>
									<settlement>Moscow</settlement>
									<country key="RU">Russia</country>
								</address>
							</affiliation>
							<affiliation key="aff4">
								<orgName type="institution" key="instit1">Hasso Plattner Institute</orgName>
								<orgName type="institution" key="instit2">University of Potsdam</orgName>
							</affiliation>
							<affiliation key="aff5">
								<orgName type="institution">Eindhoven University of Technology</orgName>
							</affiliation>
							<affiliation key="aff7">
								<orgName type="institution">Instituto Superior TécnicoUL</orgName>
							</affiliation>
							<affiliation key="aff8">
								<orgName type="department">ESW -INESCID</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Tijs</forename><surname>Slaats</surname></persName>
							<email>tslaats@itu.dk</email>
							<affiliation key="aff2">
								<orgName type="institution">IT University of Copenhagen</orgName>
								<address>
									<addrLine>Rued ; Langgaardsvej 7</addrLine>
									<postCode>2300</postCode>
									<settlement>Copenhagen</settlement>
									<country key="DK">Denmark</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="institution">Exformatics A/S</orgName>
								<address>
									<addrLine>Lautrupsgade 13</addrLine>
									<postCode>2100</postCode>
									<settlement>Copenhagen</settlement>
									<country key="DK">Denmark</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Andreas</forename><surname>Meyer</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Mathematics and Computer Science</orgName>
								<orgName type="institution">Eindhoven University of Technology</orgName>
								<address>
									<country key="NL">The Netherlands</country>
								</address>
							</affiliation>
							<affiliation key="aff4">
								<orgName type="institution" key="instit1">Hasso Plattner Institute</orgName>
								<orgName type="institution" key="instit2">University of Potsdam</orgName>
							</affiliation>
							<affiliation key="aff7">
								<orgName type="institution">Instituto Superior TécnicoUL</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Luise</forename><surname>Pufahl</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Mathematics and Computer Science</orgName>
								<orgName type="institution">Eindhoven University of Technology</orgName>
								<address>
									<country key="NL">The Netherlands</country>
								</address>
							</affiliation>
							<affiliation key="aff4">
								<orgName type="institution" key="instit1">Hasso Plattner Institute</orgName>
								<orgName type="institution" key="instit2">University of Potsdam</orgName>
							</affiliation>
							<affiliation key="aff7">
								<orgName type="institution">Instituto Superior TécnicoUL</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Dirk</forename><surname>Fahland</surname></persName>
							<email>d.fahland@tue.nl</email>
							<affiliation key="aff1">
								<orgName type="institution">National Research University Higher School of Economics</orgName>
								<address>
									<postCode>101000</postCode>
									<settlement>Moscow</settlement>
									<country key="RU">Russia</country>
								</address>
							</affiliation>
							<affiliation key="aff5">
								<orgName type="institution">Eindhoven University of Technology</orgName>
							</affiliation>
							<affiliation key="aff8">
								<orgName type="department">ESW -INESCID</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Mathias</forename><surname>Weske</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Mathematics and Computer Science</orgName>
								<orgName type="institution">Eindhoven University of Technology</orgName>
								<address>
									<country key="NL">The Netherlands</country>
								</address>
							</affiliation>
							<affiliation key="aff4">
								<orgName type="institution" key="instit1">Hasso Plattner Institute</orgName>
								<orgName type="institution" key="instit2">University of Potsdam</orgName>
							</affiliation>
							<affiliation key="aff7">
								<orgName type="institution">Instituto Superior TécnicoUL</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">{andreas</forename><surname>Meyer</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Mathias</forename><surname>Weske}</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Hpi</forename><surname>Uni-Potsdam</surname></persName>
						</author>
						<author>
							<persName><surname>De</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Christian</forename><surname>Ress</surname></persName>
							<email>christian.ress@student.hpi.uni-potsdam.de</email>
						</author>
						<author>
							<persName><forename type="first">Matthias</forename><surname>Kunze</surname></persName>
							<email>matthias.kunze@hpi.uni-potsdam.de</email>
						</author>
						<author>
							<persName><forename type="first">David</forename><surname>Martinho</surname></persName>
							<email>davidmartinho@ist.utl.pt</email>
						</author>
						<author>
							<persName><forename type="first">António</forename><surname>Rito</surname></persName>
						</author>
						<author>
							<affiliation key="aff6">
								<orgName type="institution" key="instit1">Hasso Plattner Institute</orgName>
								<orgName type="institution" key="instit2">University of Potsdam</orgName>
							</affiliation>
						</author>
						<title level="a" type="main">VisionWaves: Aligning Business Process Management and Performance Management to Achieve Business (Process) Excellence</title>
					</analytic>
					<monogr>
						<imprint>
							<date/>
						</imprint>
					</monogr>
					<idno type="MD5">4FBB797D874184E77A11CE550AEF560D</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-25T01:59+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Process Modeling</term>
					<term>Data Modeling</term>
					<term>Process Enactment</term>
					<term>BPMN</term>
					<term>SQL Ad-hoc Workflow</term>
					<term>Operational Support</term>
					<term>Recommender Systems</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Today's economic climate means businesses need to be as effective and efficient as possible, and to make the smartest possible decisions. The way to achieve this is by integrating Performance Management (PM) and Business Process Management (BPM) -a combination that is a prerequisite of Intelligent Business Operations. BPM on its own is not enough, omitting the context of processes. By adding PM you can achieve closed loop performance management, where metrics are compared with business objectives and the results fed back to improve processes and decisions. VisionWaves brings a business model driven approach to the table coping with these requirements and allowing Visual and Connected Management. Client experience confirms that PM and BPM are far more powerful when integrated together than individually.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Gaining competitive advantage through better processes and decisions and the limits of BPM</head><p>Given the economic and institutional crises in our globalized economy, the way to compete is to run your business with maximum efficiency and effectiveness, and to make the smartest possible decisions. One of the approaches is to focus on "the integrated business processes and capabilities that together serve customers in ways that are differentiated from competitors and that create the organization's formula for business success" [1]. Does BPM on its own deliver this right kind of efficiency? Does the balance between efficiency and agility meet business needs? Is the level of intimacy appropriate for every customer? Let's look at some examples:</p><p>-A customer delivery process can be very efficient (for example, following a Lean program) -in fact, it may not contain any idle time at all. But efficiency is not always matched with effectiveness. For example, it may be that because of marketing campaigns in some regions, there are configuration problems and stock breakdowns. -By definition, agility can necessitate some inefficiencies. You need more than minimal stock levels to keep a manufacturing conveyor belt running; similarly, economic growth arguably requires some level of frictional unemployment.</p><p>-A call center representative may be confronted with a demanding customer, and be forced to decide whether to prioritize overall efficiency or service to this one customer. Does the agent mark the customer as a lead, get off the phone, and move on to the next customer, or continue the conversation with this customer and let others wait? An optimized process won't help the agent decide, unless the process also provides information about the individual customer's value to the business.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Combining BPM and PM</head><p>To yield the desired competitive advantage, structured interoperability is required across the organization's entire BPM, PM and application environments. This interoperability must be business-driven, which means that the initiative needs to start from a broader context than the processes themselves: that of the business architecture, business model, or value chain. The alignment of BPM and PM can be seen from two main perspectives:</p><p>-Injecting PM into business processes to improve decision-making -i.e. taking an inside-out view of the of the decision -Closed loop performance management -i.e. taking an outside-in view of the decision</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1">Injecting PM into business processes to improve decision-making</head><p>Business rules are often used to inject PM into BPM and hence provide decision support. This is a valid approach that makes processes more flexible, offers additional analytic capabilities, and hides much of the complexity that is typical of PM. However, rules engines alone are not enough. They cannot cope with the typical scenario of proliferating business models, products, services, channels, customer segments, and value expectations from different stakeholders. Nor do they provide the vital ability to understand changing environments and respond to that understanding, especially when we look at changing sets of goals.</p><p>By combining PM with the rule-based approach associated with BPM, you can monitor the decision-making process from an analytical perspective and adapt readily to changing goals. Errors in business rules will be detected sooner, and rules can be changed as necessary, either automatically or manually.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2">Closed loop performance management</head><p>Most BPM initiatives capture metrics to assess the efficiency of a process. A few initiatives have shown, though, that more can be done by setting process metrics in the broader context of a value chain or a business model. It then becomes possible to understand the impact of a given process on the overall performance of the business: either on its overall strategic objectives or a specific business campaign. That understanding can lead to better decision-making at an organizational level.</p><p>The results of performance monitoring become even more valuable if they are used to adjust business processes and objectives. This closed loop performance management often involves human intervention to improve the way decisions are made -for example, the call center agent might be instructed to look at a metric of the customer's value to the company before deciding how long to spend on the call.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">The VisionWaves proposition</head><p>VisionWaves brings a business model driven approach to the table coping with these requirements and allowing visual and connected management.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Core to the offering is the VisionWaves business model methodology that delivers a visual representation of the coherence and dependencies between the constituents of the business through depicting an easy to understand business model or value creation model. This visual insight that is missing in most alternatives is one of the key differentiators of VisionWaves within the enterprise performance management market. Furthermore this business model is dynamic meaning that it is context aware and is capable to continuously interact with its environment and its users.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2">Connected Management</head><p>VisionWaves delivers full visibility of value creation, performance, processes and risks in integrated, strategically aligned and actionable management cockpits that are role based and that are generated and maintained by the intelligent framework itself. Based on the same underlying data everybody will get a role based portal that reflects actual, executable, connected, and integrated data.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.3">Model Driven Application Framework</head><p>VisionWaves starts with the representation of the 'business model' including customer value, the different distribution channels through which this value is delivered, products, services, processes, organization, roles, suppliers, contracts and risks. This meta-model is stored into an object repository and therefore delivers a model-driven object model.</p><p>Next step is the connection of 'meta' performance indicators to the elements of the meta-model that are also stored in the same object repository.</p><p>Furthermore to feed real data into the object repository a meta-data model is configured as well as the (meta) description of how these data are loaded into the same object repository.</p><p>Finally even the presentation layer (dashboards, cockpits) is created and maintained by the framework.</p><p>Perhaps the biggest contribution of this model driven application framework is the way it handles changes. When there is a change in the external environment or internal context management objectives, controls and of course reporting has to be realigned. In this model driven approach this can be done by reconfiguring the models or any of the model components in real-time, followed by the immediate adaptation of all related cockpits, dashboards, and reports.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Client case study: Bank achieves Operational Intelligence by combining BPM and PM</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.1">Situation after BPM-only</head><p>Previous BPM initiatives resulted in leaner operations and well-documented processes, but essential business elements were still missing. For example:</p><p>-The impact of the change on business results or business value was not clear -There was a lack of information to support senior management decisions, and the impact of those decisions on operational execution was hard to establish -There was not enough information about current processes for the COO to know whether a proposed action, such as a new market campaign, was viable in a particular region -Performance at each level of the management hierarchy (COO, value chain owner, process owner, team owner) was measured, but it was not possible to see how one level impacted another</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2">Results after implementing VisionWaves</head><p>-All management levels, from COO to operational team leaders, now have performance information about processes, customers, finance and capabilities to support their decisions -The impact of performance at one level on another level can be seen, and there is a daily "performance dialogue" between all hierarchical levels -This makes it possible to work together to achieve corporate objectives -A range of information to support business campaigns is now available -it is easy to assess whether they are viable, and then measure their impact -Through appropriate use of performance feedback, the bank has closed the loop between initiating actions, monitoring performance and taking new actions  </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Conclusion</head><p>Combining BPM and PM allows management and staff to make better and timelier decisions and the organization becomes more efficient and effective. This will help achieving business (process) excellence and is a crucial step for any enterprise with its sights set on Operational Intelligence or Intelligent Business Operations.</p><p>and less matured approach which has so far not found widespread application in industry yet, however the two declarative languages Declare <ref type="bibr">[6]</ref> and DCR Graphs <ref type="bibr">[2,</ref><ref type="bibr">3]</ref> have been studied extensively in academia over the last decade. Declarative languages do not explicitly specify flow of control, but instead specifies constraints between actions; examples of such constraints are init(A), meaning that any execution has to start by executing A, and response(A, B), meaning that after executing A, B has to be executed at some point. Other constraints deal with choices and variations of the response constraint.</p><p>Hybrid modeling. Recently interest has emerged in hybrid approaches, where some aspects of a process are specified directly using imperative constructs and other aspects declaratively. This is useful if part of the process is well-structured and part is more free, or for going from an abstract, little-understood process, often modeled more naturally using declarative constraints, to a more concrete implementation which by nature is often more imperative. One such hybrid approach is implemented in CPN Tools 4 <ref type="bibr">[5,</ref><ref type="bibr">7]</ref>. This approach combines the places and transitions of colored Petri nets with the constraints of the Declare and DCR Graphs languages. Fig. <ref type="figure" target="#fig_0">1</ref> shows an example of a mixed declarative and imperative model. In the upper half of the screen we describe the registration of a patient using an electronic patient record, which is basically form-filling and wellsuited for an imperative approach. In the bottom half we describe the treatment of the patient which is strongly knowledge-based, therefore more flexible and hence modeled using a Declarative approach. While these two aspects could have been modelled as separate processes (one imperative and the other declarative), using the hybrid approach allows us to join the two and show how they interact. Initially, only Receive Patient is required due to the declarative constraint init.</p><p>After executing Disinfect Wound, Stitch Wound has to be executed because of a response between them. Registration and treatment can happen in parallel, but prescription of antibiotics is dependent on the patient data.</p><p>The model can be extended with time information and exercises to obtain simulation-based performance information. It is also possible to obtain a simulation log from CPN Tools, which can be imported directly into ProM 6.3 for analysis using a known process. CPN Tools also offers state-space analysis for ensuring the absence of errors such as dead-locks in the process. For more information about hybrid modeling, we refer the interested reader to <ref type="bibr">[7]</ref>. Domain-specific visualization. While colored Petri net models are graphical, they are also complex to understand for non-experts. Previously, CPN Tools supported visualizations of such models by means of an external tool, but with version 4 such visualizations are internalized, making it possible to show model and visualization side-by-side without requiring external tools. In Fig. <ref type="figure" target="#fig_2">2</ref>, we see two simple visualizations of the model from Fig. <ref type="figure" target="#fig_0">1</ref>. The sequence diagram (left) shows a single patient interaction and is updated when simulation is conducted. The visualization is driven purely by the model, and as CPN Tools allows users full control over the simulation, can be used to demonstrate complex scenarios in a simple way. The bar chart (Fig. <ref type="figure" target="#fig_2">2</ref> (right)) shows aggregated statistics over multiple simulations.  from the flow of control, which makes models hard to understand and analyze. Workflow Nets solved this problem for standard Petri nets, but some of the restrictions are too severe for efficient use of the higher-level formalism. Colored Workflow Nets <ref type="bibr">[1]</ref> generalize Workflow Nets to colored Petri nets, but impose some restrictions that make models unnatural. Instead, CPN Tools implements Process-partitioned colored Petri nets (PP-CPNs) <ref type="bibr">[4]</ref>, which allow more flexibility and more natural models. PP-CPNs explicitly separate the flow of control and data, separating places into process places, local and shared places (for data), resource places, and communication (buffer) places.</p><p>PP-CPNs allow multiple instances of multiple process types to communicate, and hence supports an artifact-centric modeling style. Of course, classical Workflow Nets are recognized as PP-CPNs as one would expect. An example PP-CPN model of a simple producer/consumer system can be seen in Fig. <ref type="figure" target="#fig_5">3</ref> (top). Here, we have two kinds of processes communicating over a buffer place; producers produce items (integers), store them locally, and transmit them. They use a mutex (a single resource) to prevent race conditions. Initially there are two producers. Consumers receive data from producers, store it locally and dispatch depending on the data.</p><p>An advantage of PP-CPNs is that it is possible to generate them automatically from code and to generate running Java code from such models; an example of code generated from the model in Fig. <ref type="figure" target="#fig_5">3</ref> (top) is shown in Fig. <ref type="figure" target="#fig_5">3</ref> (bottom).</p><p>Maturity, availability, screencast. CPN Tools is a very mature tool and has been in active use for over 10 years. It enjoyed approximately 5500 downloads in the period May 1, 2012-May 1, 2013. It is used in teaching in several universities, used by companies, and a large number of case studies in several fields are available from http://cs.au.dk/cpnets/industrial-use/ and on our own homepage we showcase models from industrial case studies at http://cpntools.org/documentation/examples/. We are currently conducting case studies using the new declarative constraints, but these are on-going and not yet ready for publication. The implementation of the Declare language is an optimized version of the Declare tool <ref type="bibr">[6]</ref>.</p><p>CPN Tools is open source and available for free for everybody at http://cpntools.org/. On this page, we also have a comprehensive getting started guide including screencasts for beginners. In the future, we plan to extend CPN Tools with timed and process-aware versions of Declare.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Today, organizations use process-oriented systems to automate the enactment of their business processes. Therefore, processes are often captured in process models focusing on the activities being performed. These models are executed by process engines as, for instance, YAWL <ref type="bibr">[1]</ref>, Activiti <ref type="bibr">[2]</ref>, jBPM <ref type="bibr">[6]</ref>, Bonita <ref type="bibr">[3]</ref>, AristaFlow <ref type="bibr">[8]</ref>, and Adept2 <ref type="bibr" target="#b18">[13]</ref>. Generally, a process engine has access to a process model repository as shown in Fig. <ref type="figure" target="#fig_0">1</ref>. As soon as a start event of a particular process occurs, the engine creates a new instance of this process and enacts the control flow as specified by the process model. Thereby, the process engine is able to allocate specified user tasks to process participants via a graphical user interface or to invoke an application for execution of service tasks.</p><p>For the enactment of tasks, data plays an important role, because data specifies preand postconditions of tasks. A precondition requires the availability of certain data in a specified state while the postcondition demands certain manipulation of data. In modern activity-oriented process engines as mentioned above, these and further complex data dependencies (e.g., creating and updating multiplicity relations between data objects) have to be implemented manually through a process engineer by specifying the respective data access statements (see shaded elements in Fig. <ref type="figure" target="#fig_0">1</ref> left); this is an error-prone and time-consuming work.</p><p>In this paper, we explain an approach to model data dependencies in the process model itself and automatically derive data access statements from the process model as shown in Fig. <ref type="figure" target="#fig_0">1</ref> right. Process data utilized during activity execution is out of scope in this paper. We demonstrate the feasibility of this approach using the industry standard  for process modeling, the Business Process Model and Notation (BPMN) <ref type="bibr" target="#b17">[12]</ref>, and the Activiti process engine <ref type="bibr">[2]</ref>. Next, Section 2 shows how to model complex data dependencies in BPMN; Section 3 shows how three simple conservative extensions of the industrial process engine Activiti suffice to enact complex data dependencies from a BPMN model. We conclude in Section 4.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Modeling Complex Data Dependencies in BPMN</head><p>BPMN provides the concept of data objects that are associated to tasks <ref type="bibr" target="#b17">[12]</ref>. Roughly, a task is only enabled when its associated input data objects are in a particular state. Associated output data objects have to be in a specified state when the task completes. However, for enactment more information is required. Figure <ref type="figure" target="#fig_2">2</ref> shows a standard BPMN model of a simplified build-to-order process of a computer manufacturer (ignoring annotations set in italics). In this process, an Order that was received from a customer is first Checked and either rejected or confirmed. If it is confirmed, task Create component list creates several Components to be ordered; based on these components the order is processed in a subprocess and, when completed, the Order is sent to the customer.</p><p>In BPMN, each data object has a name and a set of attributes of which one describes the state of a data object. Data flow edges express pre-and postconditions to the different tasks, e.g., Check order is only enabled if object Order exists in state received in the current process instance. However, when handling multiple orders in different instances in parallel, the process model does not express which order is correlated to which process instance. Likewise, BPMN cannot describe create, read, update, and delete operations on one or more objects of the same kind, possibly in 1:n or m:n relationship to other data objects. For instance, one cannot express that task Create component list of Fig. <ref type="figure" target="#fig_2">2</ref> creates several new Component objects and associates them to the Order handled in the process. Such data dependencies would have to be implemented manually.</p><p>In <ref type="bibr">[9]</ref>, we have shown that a few simple additional annotations to BPMN data objects suffice to describe such complex data dependencies with operational semantics directly in BPMN. First, borrowing an idea from business artifacts <ref type="bibr">[5]</ref>, we propose that for each process instance (and each instance of a subprocess) exists exactly one data object instance driving and orchestrating the execution of the process instance. All other data objects used in the instance depend on that case object. The case object of Fig. <ref type="figure" target="#fig_2">2</ref> is an Order as shown by the annotation. Dependencies between data objects are expressed via primary and foreign key attributes in analogy to databases <ref type="bibr" target="#b19">[14]</ref>. Each data object has a primary key attribute that uniquely distinguishes different instances of this object, e.g., Order has the primary key attribute o id and Component has cp id. Foreign key attributes link object instances, e.g., attribute o id links Components to Orders. The primary key of the case object implicitly links to the instance identifier of the (sub-)process. Read and update of data objects are already provided through BPMN's data flow edges. We express create or delete through respective annotations in the BPMN data object, e.g., Create components list creates several new Components (annotation [new] and multi instance characteristic |||) and relates them to the current Order.</p><p>Most importantly, these annotations have operational semantics. In <ref type="bibr">[9]</ref>, it is shown how to derive SQL queries from annotated BPMN data objects that realize the specified data operations. For example, for object Order in state rejected written by activity Check order in Fig. <ref type="figure" target="#fig_2">2</ref>, the corresponding SQL query is dervied: UPDATE Order SET state = 'rejected' WHERE o id = $ID with $ID representing the identifier of the (sub-)process the activity belongs to. See <ref type="bibr">[9]</ref> for full details.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Tool Architecture and Implementation</head><p>We implemented the approach of Section 2 to show its feasibility. In the spirit of adding only few data annotations to BPMN, we made an existing BPMN process engine dataaware by only few additions to its control structures. As basis, we chose Activiti <ref type="bibr">[2]</ref>, a Java-based, lightweight, and open source process engine specifically tailored for a subset of BPMN. Activiti enacts process models given in the BPMN XML format. Activiti supports standard BPMN control flow constructs. Data dependencies are not enacted from the process model, but are specified separately. We adapted the Camunda <ref type="bibr">[4]</ref> modeler to allow the creation of BPMN models with our proposed concepts; we extended the Activiti engine to enact process models with these concepts.</p><p>First, we extended the XML specification by utilizing extension elements, which the BPMN specification explicitly supports to add new attributes and properties to existing constructs. We added the case object as additional property to the (sub-)process construct. The data object was extended with additional properties for primary key (exactly one), foreign keys (arbitrary number), and the data access type as attribute. The BPMN parser of Activiti was extended to read BPMN data objects with the new attributes and properties, and data associations. The actual execution engine was extended at two points: before invoking the execution of an activity to check the preconditions of an activity and before completing an activity to realize the postconditions, both with respect to data objects. At either point, the engine checks for patterns of data input and output objects and categorizes them. For instance, in Fig. <ref type="figure" target="#fig_2">2</ref>, Order is input and output to Check order in different states. The engine classifies this as a "conditional update of case object Order". The data operations at task Create component list would be classified as "conditional creation of multiple data objects that depend on the case object (1:n relationship)". Classification proceeds from most specific to most general patterns.</p><p>When invoking an activity, for each matching precondition pattern a corresponding SQL select query is generated to read whether the required data objects are available. The query assumes that for each data object of the process model exists a table holding all instances of this object and their attributes. If there is an object instance in the right state, the SQL query returns the corresponding entry and is empty otherwise. The engine repeatedly queries the database until an entry is returned (i.e., the task is enabled), as shown in Fig. <ref type="figure" target="#fig_5">3</ref>. Then the activity is executed. Upon completion, an SQL insert, update, or delete query is generated for each matching postcondition pattern, and executed on the database.</p><p>Altogether, we had to extend Activiti at merely 4 points to realize our concept, as illustrated in Fig. <ref type="figure" target="#fig_13">3:</ref> (1) at the XML, (2) at the parser and internal representation, (3) when checking for enabling of activities, and (4) when completing an activity. The extensions required just over 1000 lines of code with around 600 lines being concerned with classifying data access patterns, generating, and executing SQL queries. The extended engine, a graphical modeling tool, example process models, a screencast, and a complete setup in a virtual machine are available together with the source code at http://bpt.hpi.uni-potsdam.de/Public/BPMNData.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Conclusion</head><p>In this paper, we presented an approach how to automatically enact complex data dependencies from activity-centric process models. They key concepts required are data objects associated to tasks; a few annotations allow to express relations between data objects and the particular data operation. Our modeling tool helps to easily specify the required annotations in a graphical user interface. From these annotations, SQL queries can be automatically generated and executed from a process engine, covering all fundamental data access operations: create, read, update, and delete of single data objects and of related data objects in 1:n and m:n relationship <ref type="bibr">[9]</ref>. We have shown on the process engine Activiti that minimal extensions to existing execution engines suffice to implement this concept.</p><p>Compared to other techniques and engines for enacting data dependencies from models, our approach is less intrusive. The object-centric modeling paradigm <ref type="bibr">[5,</ref><ref type="bibr">11]</ref> requires substantial changes to the infrastructure as completely new engines have to be used. Process engines for this paradigm exist, e.g., PhilharmonicFlows <ref type="bibr">[7]</ref> and Corepro <ref type="bibr">[10]</ref>, but they are incompatible with activity-centric approaches as it is supported by BPMN. In this respect, our work fills a critical gap in allowing owners of activitycentric processes to adapt automated enactment of data dependencies without changing paradigms and infrastructure.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction &amp; Background</head><p>Business process models are a central cornerstone of process-oriented organizations as they explicitly capture the knowledge to carry out the operations of a business and are reused for documentation, analysis, automation, and certification, among others. Modern companies maintain thousands of process models for reference and reuse <ref type="bibr">[2]</ref>, which requires effective capabilities to search among them. Researchers have proposed an abundance of techniques that focus on text, structure, or behavior of process models and allow for similarity search, i.e., obtaining approximate matches to a complete model, and querying, i.e., searching precisely by few yet relevant aspects of a model <ref type="bibr">[2]</ref>.</p><p>The majority of these process model search techniques, however, has been presented only theoretically. Evaluations of their quality and performance has been conducted under lab conditions, i.e., with a minimalistic implementation that mainly addresses the query processing, i.e., comparing candidate models with the query and deciding a match. This is, arguably, due to the effort of providing a complete process model search infrastructure that includes a user interface to formulate a query, a process model repository to store, manage, and retrieve models from, and the visual presentation of search results as a response to the query. This functionality is shared among all process models search techniques.</p><p>To this end, we have developed a prototypical process model search platform that assumes these tasks and allows for the integration of dedicated search techniques in form of a search engine plugin architecture. This includes a set of well-defined APIs that integrate a search engine with our platform. Moreover, the platform provides a framework to evaluate a search engine with regards to the quality of a search technique, i.e., the relevance of the provided results, and, the performance of its implementation. The platform aims to reduce the time to implement and evaluate a particular search technique, and enables the comparison of various techniques, as they can now be deployed in the same runtime environment.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Search with your Search Technique</head><p>The central concept of our search platform is to enable developers to deploy a dedicated search engine to the platform and use it to search for process models in a straight-forward manner. Hence, one of the key features of our search platform is a presentation layer, which lets users specify search queries using BPMN and view ranked search results in a similar visual representation, depicted in Fig. <ref type="figure" target="#fig_0">1</ref>. The presentation layer includes a simplistic, web-based process model editor, that allows formulating queries as regular process models, as this is typical for similarity search and has been proposed for querying <ref type="bibr">[6]</ref>, too. The editor itself is highly extendable, which allows the formulation of queries in languages devised for search, e.g., BPMN-Q <ref type="bibr">[1]</ref>. The input query is provided to the dedicated search engine that parses it and matches it against stored process models. To this end, BPMN queries are transformed to Petri nets.</p><p>The extensible architecture of the search platform makes it possible to integrate a dedicated search engine, provided it is implemented in Java, by providing common interfaces through which the search platform can communicate with the search engine. Currently, we require that search algorithms accept queries in form of Petri nets, defined using the jBPT Java library<ref type="foot" target="#foot_0">1</ref> , and return search results in a similar format. We resorted to Petri nets, as they provide a common formalism for a variety of business process modeling languages <ref type="bibr">[7]</ref>.</p><p>Our aim is to provide researchers with an opportunity to experiment with their algorithms faster and easier, and explore the search results in an environment that is similar to one that end users expect. We do this by providing an API through which search algorithms can expose parameters that can be changed during runtime, and make these parameters accessible in the search interface. This way, parameters can be configured without modifying source code or static configurations, and without recompilation. The results of the chance are visible immediately.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Evaluate your Search Technique</head><p>Our platform has originally been devised to integrate a number of dedicated search techniques in a common infrastructure. However, it turned out that the very same functionality of a search engine can be used to evaluate the underlying search technique and its implementation. That is, experiments are typically carried out by running a well-defined set of queries against a set of candidates, and assessing quality and performance. Thus, we developed a framework that allows running predefined experiments against search engines without the need for a complex evaluation infrastructure. Two methods for evaluating a search technique are provided. Quality judges on the relevance of matches in a search result with respect to a human assessment and therefore needs a reference data set. For similarity search, such a dataset has been introduced in <ref type="bibr">[3]</ref>. Performance addresses the resource consumption of the implementation of a search engine and its scalability with regard to large process model collections. Hence, performance can be evaluated without a human data set, using any process model collection.</p><p>For this purpose, a number of time measurements and counters are provided through an API, which can be uses during the execution of a search run by a dedicated search engine. Features, such as support for laps and persistence of counters and timers over multiple search requests are available. All measurements are automatically included in the response of a query, along with statistical measures such as average, median, quantiles, standard deviation, etc.</p><p>To evaluate a search technique, we developed a web-based evaluation interface that allows choosing among a set of quality and performance evaluation methods, e.g., computation of precision and recall values and the visualization of precisionrecall curves. With regards to performance measures, trend analyses can be plotted over a number of search runs for various sizes of the candidate model collection.</p><p>Fig. <ref type="figure" target="#fig_2">2</ref> shows an excerpt of the evaluation interface that allows choosing a dedicated search engine and compute precision and recall values for each of the queries. The result is provided in a table for each query, and a visualization shows the queries in a coordinate system. This allows for fast identification of queries with significantly good (right upper quadrant) or poor (left lower quadrant) quality. The search platform has been implemented as a two-tiered web application, consisting of an HTML5 frontend and a Java backend, depicted in Fig. <ref type="figure" target="#fig_5">3</ref>. The web-search and evaluation interfaces are implemented as web applications that run in common web browsers and require no installation. They communicate with the search platform via a JSON API and prove for the interaction with the user. For search, a simplistic process model editor based on the processWave editor<ref type="foot" target="#foot_1">2</ref> is provided to formulate the query. Search results are provided as an ordered list along with quality measures, cf. <ref type="bibr">[4]</ref>. Evaluation offers predefined experiments, i.e., a set of queries and candidates, run against a dedicated search engine and visualizes results in terms of quality and performance. Particular techniques to match a query with models from the process model repository are realized in dedicated search engines.</p><p>Search and Evaluation interfaces communicate with the platform server. As an evaluation comprises running reference searches, the platform server does not distinguish between search and experiment. That is, the experiment framework is implemented completely client-side. The search platform server integrates different dedicated search engines. Such an engine comprises at least a query processor that decides, whether a candidate matches the query and scores relevance. A custom index enables efficient search. To facilitate the implementation of these components, the search platform provides shared components that can be accessed by the components of a search engine, i.e., a model cache that underpins a custom index and a persistence interface with the repository that manages stored process models. The model cache increases startup speed as it preserves data that has been expensively precomputed, when models are loaded.</p><p>Through our strict use of a generic JSON API to access the search platform server it could also be used as a web service for process search and be integrated with other services or applications.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Maturity &amp; Show Case</head><p>The search platform has been implemented as a prototype to elaborate on the requirements on search engines and their integration with a common platform. Since dedicated search methods require their own query processor and indexes, such a platform provides users with a unique search interface that covers various perspectives of process model search, including similarity search and querying. At the current state, we have implemented similarity search and querying based on successor relations, cf. <ref type="bibr">[5,</ref><ref type="bibr">6]</ref>. As matching is conducted on net-based formalisms, search results are currently presented using their Petri net representations. This shall be extended in future work. Also, the quality experiments are limited to human assessment of similarity, cf. <ref type="bibr">[3]</ref>, as other reference data was not available.</p><p>In the demo, we address researchers that are interested in process model search and may even have proposed a search technique on their own. We will introduce the platform and its architecture in a brief presentation before turning to a live demo that comprises two parts.</p><p>1. We demonstrate the search and evaluation capabilities of the platform by means of example queries, and their results. This includes a discussion of search result quality metrics and how they support users in understanding a search result. For evaluation, we show how various measures and diagrams give insight into the quality and performance of a search technique. 2. In a quick walkthrough tutorial, we explain the requirements of a custom search engine and which steps are required to integrate it into the platform by a simple example. A screencast demonstrating our search and evaluation platform can be found at: http://vimeo.com/ress/process-search-demo. The platform is publicly available under the MIT open source license along with a short tutorial on how to use it and integrate a custom search engine at http://bitbucket.org/ress/ process-search.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">The Worklr Tool Approach</head><p>Our approach to capture knowledge workers' tacit knowledge is based on an ad-hoc workflow system which implements conversational acts, and where business goals are achieved when a set of data objects is produced <ref type="bibr">[1]</ref>. Knowledge workers are responsible to produce data objects towards the complete generation of the information required for the process goal achievement. Additionally, knowledge workers can establish conversational acts to request the production of data objects by their co-workers. Associated to a request, the initiator may include some input data objects, and the executor is expected to reply with output data objects that she produced to fulfill the request. The flow of conversational acts (i.e. requests), supported by the ad-hoc workflow system, is driven by the knowledge workers' knowledge of their own organizational responsibilities and their co-workers responsibilities: they know who is responsible to produce what.</p><p>Consider a Travel Process case where an employee wants to go on a business travel to Beijing. First, he creates a process instance named Travel Process.</p><p>As he initiates such process instance, the employee is automatically assigned to execute the first request of the process, a.k.a. root request. It is within this root request, automatically named after the process name, that the employee will create any necessary sub-requests.</p><p>The employee knows that the secretary is responsible to handle his trip, but before he sends her a sub-request, he creates a set of data objects that he knows the secretary will need. Therefore, the employee creates a data object labeled Destination and fills its value with Beijing, two data objects labeled Departure Date and Returning Date with the values 25/08/2013 and 30/08/2013, respectively, and a data object labeled Motivation where he states that he wants to attend the BPM2013 conference. For now, these four data objects are defined as the creation context of the request identified by the request label Travel Process <ref type="foot" target="#foot_2">3</ref> .</p><p>Having the necessary data objects, the employee is in condition to send a sub-request to the secretary. To do so, he creates a new sub-request, chooses the secretary's queue as destination, names the request Travel Request, adds a little description to communicate the motivation for that request as a commentary, and selects all the data objects he created before: Destination, Departure Date, Returning Date and Motivation. As the employee sends the sub-request, that request is published in the secretary's work queue.</p><p>As one secretary claims the request for execution, she is proclaiming herself as the executor of that request, being able to see the data objects provided by the request's initiator. Having the information provided by the employee, the secretary calls some travel agencies to request some hotel and flight prices. However, before booking any hotel room or flight, the secretary needs an authorization from the supervisor. Similarly to the employee, the secretary also knows that the supervisor needs to know the trip details and the best tender value she got from the agencies. As such, she creates a data object named Tender and fills it up with the best price she got from the travel agencies. Afterwards, the secretary creates a new sub-request, which she addresses to the supervisor's queue, labels the request Travel Authorization, writes a comment on what she needs from the supervisor, and, as input, she selects the data objects labeled Motivation, Departure Date, Returning Date, Destination and Tender.</p><p>In the same way, as the request is sent, it is published in the supervisor's queue. As one of the supervisors claims this request for execution, he can see the data objects that the secretary attached as input of the request. Based in this data, the supervisor executing the request decides to either authorize or not the travel. To let the secretary know that she can proceed with the hotel and flight booking, the supervisor creates a new data object labeled Authorization and defines its value as Granted. Then, the supervisor is in condition of responding to the request and let the secretary know of his decision. Notice that the secretary will only see the data object if the supervisor explicitly selects it when he responds to the request. After the supervisor responds to the request, he cannot create any sub-request or data objects in that context<ref type="foot" target="#foot_3">4</ref> .</p><p>After the supervisor replies to the Travel Authorization request, the secretary can see the responded data object Authorization along with its Granted value. Based on that positive value, the secretary calls the travel agency and books the hotel and flight, obtaining the respective reservation number and flight ticket number. With this information, she creates two data objects labeled Hotel Reservation and Flight Number and fills in the respective information. After this, she is in condition to respond to the original request initiated by the employee, providing both the Hotel Reservation and Flight Number data objects.</p><p>As the secretary responds to the request, the employee can see the hotel and flight information contained in the data objects included in the response. Since he needs no more information, and has no pending or executing sub-requests, the employee completes the process.</p><p>With the process completed, the employee is essentially saying that the process attained its goal, which consists in getting the travel approved and both the hotel and flight information. During the process execution, several data objects were produced and drove the process instance. Worklr considers all the created data objects and stores their set of labels, along with their cardinality, as a business process goal. This means that completing the process depicted in the example above results in a process goal with the following eight data object labels:</p><formula xml:id="formula_0">-1 x Destination -1 x Motivation -1 x Departure Date -1 x Returning Date -1 x Tender -1 x Authorization -1 x Hotel Reservation -1 x Flight Number</formula><p>Having this process goal is important to guide future business process instances under the same label, i.e. process instances labeled Travel Process. Worklr knows, that if in the past a particular process goal was attained, it is likely that same goal will be attained again in a future execution of that kind of process. Hence, along with other contextual information gathered during the execution of a business process instance, Worklr uses such business process goal information and inspects the current state of the process to recommend the creation of new data objects or sub-requests. Such recommendations are based on a process goal and the current execution context, i.e. the user's roles, the labels of data objects available to the user, and the overall process' data object labels.</p><p>Therefore, apart from providing operational support, Worlr is also capable of storing previous executions in a structured way, which contain information to guide future process instances with the same label. Nevertheless, as new business situations occur (e.g. if the supervisor refuses the authorization), the Worklr ah-hoc aspect supports that change, i.e. it does not enforce recommendations, and stores the new attained process goal. That new process goal is taken into consideration in the following executions of processes labeled Travel Process.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.1">Features</head><p>The tool is in its first prototype stage, and its source code is not available yet due to intellectual property restrictions inherent to the PhD status of one of the authors.</p><p>The following list highlights a set of core functionalities of the Worklr application, regarding operational support:</p><p>-Create new business process instances.</p><p>-Create data objects within the execution of a request.</p><p>-Edit data objects created within the execution of a request.</p><p>-Create sub-requests and publish them into user and role queues.</p><p>-Claim requests received in the Inbox for execution.</p><p>-Communicate with the initiator of the executing request.</p><p>-Communicate with the executor of the sub-request.</p><p>-Cancel pending and executing sub-requests.</p><p>-List all sub-requests initiated and see their respective status (pending, executing, completed, canceled). -Provide data objects as input of sub-requests.</p><p>-Respond to executing requests, specifying which data objects should be included in the response. -Complete business process instances.</p><p>Apart from operational support, Worklr saves executions in the form of request templates, which are entities that abstract the different completed requests by storing some contextual information. A request template is therefore composed by:</p><p>an initiator role context, which is essentially the set of organizational roles played by the initiators of that kind of request. an input data context, which is the set of data object labels, and respective cardinality, of the data objects provided as input in that kind of request. a creation data context, which is also a set of data object labels, and respective cardinality, created in that kind of request. an output data context, which, analogously, is the set of data object labels, and respective cardinality, responded in that kind of request.</p><p>Along with all the business process goals attained under a particular process label, e.g. Travel Process, the Worklr tool provides a recommender system that analyses the context of execution of the current user, and computes a set of recommendations of both the labels of the data objects that should be created, and the labels of the sub-requests that can also be created. To each recommendation is associated a score, as discussed in <ref type="bibr">[2]</ref>, from which the list of recommendations is ordered accordingly.</p><p>Hence, the list of Worklr features is extended by this recommender system, and the tool can also:</p><p>-Provide sub-request recommendations based on the current execution context. -Provide data creation recommendations based on the current execution context.</p><p>As users perceive recommendations as useful, they are re-using the labels and behavior from other organizational parties playing a similar set of roles. Additionally, although the number of process, request and data object instances increase as more cases are handled, if recommendations are followed, the number of request templates and data object templates converges and recommendations become more accurate.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.2">Experiment</head><p>As shown in the screencast of the tool<ref type="foot" target="#foot_4">5</ref> , Worklr is in a functional state enough to conduct an experiment. In <ref type="bibr">[3]</ref>, we discuss the results of an experiment that we recently conducted, where we gathered some important feedback.</p><p>In the experiment, we had 13 participants and 1 confederate. The confederate would initiate new process instances and the root request, while the 13 participants were distributed across 4 distinct organizational roles that should cooperate to attain a particular business process goal. The business process used in the experiment was similar to the one exemplified here, but with more business rules.</p><p>A last comment on the experiment: we gathered some important results from the tool as we perceive some convergence in the use of labels by different participants. In the cold-start of the experiment, users would use different labels to identify requests and data objects, but after 16 business process instances, we identified some interesting convergence results.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Declarative Process Mining with the Declare Component of ProM</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Fabrizio Maria Maggi</head><p>University of Tartu, Estonia f.m.maggi@ut.ee</p><p>Abstract. Traditional process mining techniques mainly work with procedural modeling languages (like Petri nets) where all possible orderings of events must be specified explicitly. However, using procedural process models for describing processes working in turbulent environments and characterized by a lot of variability (like healthcare processes) is extremely difficult. Indeed, these processes involve several possible execution paths and representing all of them explicitly makes the process models quickly unreadable. Using declarative process models (like Declare) ensures flexibility in the process description. Even processes that work in environments where participants have more autonomy and are, therefore, more unpredictable can be represented as compact sets of rules.</p><p>In the process mining tool ProM, there are several plug-ins that can be used for different types of analysis based on Declare ranging from the discovery of Declare models, to conformance checking, to the online monitoring of running process instances.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Process discovery, conformance checking and process model enhancement are the three basic forms of process mining <ref type="bibr">[9]</ref>. In particular, process discovery aims at producing a process model based on example executions in an event log and without using any a-priori information. Conformance checking pertains to the analysis of the process behavior as recorded in an event log with respect to an existing reference model. Process model enhancement aims at enriching and extending existing process models with information retrieved from logs, e.g., a process model can be extended with performance-related information such as flow time and waiting time.</p><p>In the last years, several works have been focused on process mining techniques based on declarative process models <ref type="bibr">[2]</ref><ref type="bibr">[3]</ref><ref type="bibr">[4]</ref><ref type="bibr">[6]</ref><ref type="bibr">[7]</ref><ref type="bibr">[8]</ref>. These techniques are very suitable for processes characterized by high complexity and variability due to the turbulence and the changeability of their execution environments. The dichotomy procedural versus declarative can be seen as a guideline when choosing</p><p>The author would like to thank R.P. Jagadeesh Chandra Bose, Andrea Burattin, Massimiliano de Leoni and Luciano García-Bañuelos for their collaboration in the implementation of the plug-ins presented in this paper. the most suitable language to represent models in process mining algorithms: process mining techniques based on procedural languages can be used for predictable processes working in stable environments, whereas techniques based on declarative languages can be used for unpredictable, variable processes working in turbulent environments.</p><p>The plug-ins presented in this paper use the Declare notation <ref type="bibr">[10]</ref>. Declare is a declarative language that combines a formal semantics grounded in Linear Temporal Logic (LTL) on finite traces. A Declare model consists of a set of constraints which, in turn, are based on templates. Templates are abstract entities that define parameterized classes of properties and constraints are their concrete instantiations.</p><p>In this paper, we illustrate how the presented plug-ins work by showing their application in the context of the BPI Challenge 2011 <ref type="bibr">[1]</ref> that records the treatment of patients diagnosed with cancer from a large Dutch hospital. This demonstrates that they are viable to handle event logs of real-life size.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Plug-ins Description</head><p>Through the Declare Maps Miner, it is possible to generate from scratch a set of Declare constraints representing the actual behavior of a process as recorded in an event log. The user selects from a list of Declare templates the ones to be used for the discovery task, i.e., the mined model will contain only constraints that are instantiations of the selected templates. In addition, the user can provide the plug-in with some a-priori knowledge (if it is available), to guide the discovery task. For example, the user can decide to limit the possible constraints to discover. This allows her to generate only constraints that are the most interesting from the domain point of view, thus reducing the complexity of the resulting models.</p><p>There are two mechanisms for limiting the possible constraints to discover. First, it is possible to ignore constraints between event types (such as start and complete) of the same activity. <ref type="foot" target="#foot_5">1</ref> Secondly, the user can provide different groups of activities (each group can be loaded in the form of a list of activities in a text file) and specify if only intra-group or inter-group constraints should be considered. Intra-group constraints refer to the class of constraints where the activities involved all emanate from a single group. In many scenarios, analysts would be interested in finding constraints between activities pertaining to a functionality, to a particular department in an organization, etc. For example, in a hospital event log, an analyst would be interested in finding relationships/constraints between the various administration activities. Inter-group constraints refer to the class of constraints where the activities involved belong to two different groups. For example, in a hospital log, an analyst would be interested in constraints between activities involved in surgery and therapy.</p><p>The user can also specify thresholds for parameters minimum support and alpha. Parameter minimum support allows her to select the percentage of traces in which a constraint must be satisfied to be discovered (and to filter out noisy traces). Parameter alpha can be used ignore constraints that are trivially true in the discovery task. For example, constraint response(A,B), specifying that if A occurs, then eventually B follows, is trivially true in process instances in which A does not occur at all. Fig. <ref type="figure" target="#fig_0">1</ref> <ref type="foot" target="#foot_6">2</ref> shows the output produced by the Declare Maps Miner. The discovery results are presented to the user as interactive maps and give different information about the process. Activities (shown as rectangles) are colored based on their frequency (from white indicating low frequency to yellow indicating high frequency). The user can highlight, in the discovered maps, the Declare constraints (shown as arcs between activities) that are more relevant (based on different metrics) or prune out the ones that are less interesting, redundant or deriving from noise in the log. The maps produced by the Declare Maps Miner also show delays, latencies, time distances between activities and several statistics related to the temporal dimension of the process.</p><p>If an existing Declare model is already available, it is possible to repair it based on the information retrieved from a log (plug-in: Repair a Declare Model ). The user can decide which aspects must be kept in the original set of rules and which ones can be discarded. For example, the user can decide to keep only certain types of constraints or constraints involving certain activities. It is possible to just remove less interesting or redundant rules or to strengthen (if possible) the constraints included in the original set. Starting from a Declare model and from a log, the Declare model can be extended with time information (plug-in: Extend a Declare Model with Time Information). Through the Data-Aware Declare Miner, it is also possible to discover constraints enriched with data conditions.  The Declare Replayer and the Declare Diagnoser (Declare Checker package) can be used to check the conformance of the actual behavior of a process as recorded in an event log with respect to a reference Declare model. The Declare Replayer generates a set of alignments between the log and the Declare model, i.e., information about what must be changed in the log traces to make them perfectly compliant with the model. The Declare Diagnoser projects the results obtained through the Declare Replayer onto the reference model (as shown in Fig. <ref type="figure" target="#fig_2">2</ref>). This projection produces a map in which the critical activities/constraints of the reference model are highlighted. In particular, activities are colored from red to green according to the number of moves they are involved in, i.e., according to how many times they were in the wrong place in the log or missing when required. Constraints are colored from red to green based on the number of moves that are needed to make the log compliant to them. Through the Declare Analyzer (Fig. <ref type="figure" target="#fig_5">3</ref>), the user can pinpoint where the process execution deviates from the reference Declare model and quantify the degree of conformance of the process behavior through several metrics, e.g., fulfillment ratio and violation ratio.</p><p>For completeness, we mention that, with Mobucon LTL (a provider of the operational support service in ProM), it is also possible to monitor a running process with respect to a reference Declare model and get information about its compliance at runtime. However, this functionality has already been presented in <ref type="bibr">[11]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Summary</head><p>The plug-ins belonging to the Declare component of ProM are advanced prototypes and, as demonstrated through their application to the log provided for the BPI challenge 2011, are able to handle logs of real-life size. The Declare Maps Miner and Mobucon LTL have been used in the Poseidon project (http://www.esi.nl/short/poseidon/) to monitor sea vessels. In particular, in <ref type="bibr">[5]</ref>, we show how it is possible to construct Declare models from real-life historical data and monitor live data with respect to the mined model.</p><p>We consider the set of ProM plug-ins presented here stable enough for evaluating the declarative approach in real-life case studies. Binaries and source code can be obtained from the ProM repository (https://svn.win.tue.nl/ repos/prom) and a release of each plug-in including the functionalities described in this paper is included in the nightly build version of ProM (www. processmining.org). A screencast of this demo can be found at http://math. ut.ee/ ~fabrizio/BPMdemo13/demo/demo.html. A flyer with a summary of the functionalities described in this paper is available at http://math.ut.ee/ ~fabrizio/BPMdemo13/demo/DeclareFlyer.pdf.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Leveraging Super-Scalarity and Parallelism to Provide</head><p>Fast Declare Mining without Restrictions Michael Westergaard 1,2 and Christian Stahl 1</p><p>1 Department of Mathematics and Computer Science, Eindhoven University of Technology, The Netherlands 2 National Research University Higher School of Economics, Moscow, 101000, Russia m.westergaard@tue.nl, c.stahl@tue.nl Abstract. UnconstrainedMiner is a tool for fast and accurate mining Declare constraints from models without imposing any assumptions about the model. Declare models impose constraints instead of explicitly stating event orders. Constraints can impose various choices and ordering of events; constraints typically have understandable names, but for details, refer to <ref type="bibr">[5]</ref>. Current state-of-the-art mining tends to fail due to a computational explosion, and employ filtering to reduce this. Our tool is not intended to provide user-readable models, but instead to provide all constraints satisfied by a model. This allows post-processing to weed out uninteresting constraints, potentially obtaining better resulting models than making filtering beforehand out of necessity. Any post-processing (and complexity-reducing filtering) possible with existing miners is also possible with the UnconstrainedMiner; our miner just allows more intelligent post-processing due to having more information available, such as interactive filtering of models. In our demonstration, we show how the new miner can handle large event logs in short time, and how the resulting output can be imported into Excel for further processing. Our intended audience is researchers interested in Declare mining and users interested in abstract characterization of relationships between events. We explicitly do not target end-users who wish to see a Declare model for a particular log (but we are happy to demonstrate the miner on other concrete data).</p><p>Processes that are not perfectly understood or have little structure, are often easier modeled using a declarative rather than a classical imperative approach. In a declarative approach, constraints between tasks are described rather than for each task specifying the next task to execute. Declarative modeling is a more recent and less matured approach, which has so far not found widespread application in industry yet. Declare <ref type="bibr">[7]</ref> is an emerging language studied in academia over the last decade.</p><p>Existing miners. Typically the complexity of checking a constraint is O m • n k • k! , where m is the number of traces in the log, n is the number of different events in the entire log, and k is the number of parameters to the constraint (number of ways to assign n events to k ordered parametersn P k = n k • k! -times the number of traces k).</p><p>Support from the Basic Research Program of the National Research University Higher School of Economics is gratefully acknowledged.</p><p>results). We mine the same set of constraints as MINERful in 32 seconds (vs 26). This is done automatically, making it easy to add constraints by entering their LTL semantics.</p><p>Symmetry reduction. We observe that some Declare constraints are symmetric in their parameters; this is, for example, the case for the choice 1 of 2 constraint. By only checking these constraints for one ordering of parameters, we halve the number of checks (and time needed). In general, this reduces the checking complexity to O m • n k • k! ÷ sg∈SG |sg|! , i.e., divided by the faculty of the size of each symmetry group sg ∈ SG. The number we divide by tends to get larger exactly when the traditional approach has problems (i.e., when k is large). For example, the most complex standard Declare constraint is choice 1 of 5, which yields the aforementioned 67 billion checks for the BPI challenge log. As this constraint is symmetric in all parameters, this is reduced to 556, 249, 848 checks. Our implementation can mine all 34 Declare constraints for the BPI challenge log in 287 seconds (so the reduction allows us to check all constraints in the same amount of time we could only check the easy ones before). We now tie with MINERful at 26 seconds (the reason for less reduction is that MINERful does not handle any constraints where the reduction is large). For the standard constraints, we never have more than two symmetry groups, which means that as number of parameters go up, so does the reduction. We compute the symmetry reduction automatically, making this compatible with any added constraint.</p><p>Parallelization. As our miner is simple, we do not need any global computations so far. This means our miner is easy to parallelize. We can parallelize in the number of constraints or the number of traces. With 34 constraints, we can mine each individually. Declare constraints are not equal however, and indeed the choice 1 of 5 constraint is responsible for more than 50% of the total time spent on the BPI challenge log, making this strategy less compelling. As we generate the parameters for the mining deterministically, we can have each computation thread take care of this on its own, alleviating this (but making mining more complex). A simpler approach, and the one we have chosen to go with, is to split the log and have each computation thread handle part of the log. As we have abolished a priori reduction, we do not need any central coordination in this case, and our overhead is so small (&lt; 1 second) we can just run the full mining process for each part of the log. Our results can be directly added, making mining scalable nearly infinitely (with constant overhead for preprocessing and aggregation). This has the added benefit of allowing each thread to use less memory (it only needs to store its own part of the log). Employing this allows us to mine all constraints of the BPI challenge log in 174 seconds (using two cores; the reason scalability is not completely linear is due to the feature Turbo Boost on modern CPUs allowing one core on a multi-core system to run at extra speed if other cores are inactive). We beat MINERful at 19 seconds (vs 26). We have implemented this in a parallel setting (i.e., multiple CPU cores on a single machine), but it is equally applicable in a distributed setting as synchronization is only necessary for aggregating results at the end.</p><p>Super-scalar mining. Using the automaton representation from <ref type="bibr">[6]</ref>, it is possible to check more than one constraint at a time. This comes at a cost of memory as the combined automaton is larger than the individual automata, sometimes even exponentially so, though in practise the size is always less than 1,000 states. We call this super-scalar mining, using the term from CPUs where it refers to the ability to execute multiple instructions simultaneously. We do not wish to break our symmetry reduction from before. Thus, we build a directed acyclic graph with constraints for nodes and arcs from one constraint to another if the first implies the second holds. We then group by taking each leaf and iteratively grouping it with all predecessors and successors not extending the set of parameters and not having incompatible symmetry groups (e.g., splitting them up so [[A], [B]] is not compatible with [[A, B]]-it would split up the symmetry group-but the other way around does hold-the symmetry group is already split). The rationale is that adding new parameters increases complexity as does splitting symmetry groups. Adding a constraint with larger symmetry groups does not increase complexity, while the constraint with larger symmetry groups could be checked more efficient on its own, adding to the super-scalar group comes at no extra cost. We check all such maximal groups. We compute this automatically and tailor it to any constraints selected and any new constraints added. Using super-scalar mining with parallelism, we can mine the entire BPI log in only 57 seconds and even without parallelism, we can mine the entire log in just 92 seconds. We beat MINERful on their constraints at just over 4 seconds, an improvement of a factor 6 over 26 seconds. On a server with 8 slower processing cores, MINERful still runs in 26 seconds, whereas UnconstrainedMiner runs in 30 seconds on the entire set of constraints. Using hyper-threading, the UnconstrainedMiner and MIN-ERful tie in running time with the UnconstrainedMiner mining more and more complex constraints. The gain obtained by super-scalar mining is independent of the log. Usage. In Fig. <ref type="figure" target="#fig_2">2</ref> (top) we see a screen-shot from the configuration of the mining process. All options are saved between runs, making mining from scratch a breeze; the number of threads is computed automatically as the number of cores in the local computer. We can manually inspect the LTL semantics and constraint automaton at the right. In Fig. <ref type="figure" target="#fig_2">2</ref> (bottom left) we see the tool in the process of mining. The top progress shows overall progress, and the next progress bars show overall and individual constraint progress for each computation thread. The text area is continuously updated with information about what each thread is doing. At Fig. <ref type="figure" target="#fig_2">2</ref> (bottom right), we see the end result along with total processing time. The end result is a list of all mined constraints, which can be imported in Excel for further processing, as shown in Fig. <ref type="figure" target="#fig_5">3</ref>.</p><p>Maturity, availability, screencast. Our miner is very new but so far very robust, and uses the same underlying library as the Declare tool (for representing automata) and ProM (for reading logs). The tool is written in Java and runs on all platforms supported by Java. We are currently employing the tool to construct hybrid models containing both declarative constraints and imperative modeling in a block-structured manner.</p><p>Our miner is available from tinyurl.com/declareminer along with source code. That page also contains a screen cast of simple use of the tool.</p><p>a modeler to define, analyze and maintain workflow models by using all of the workflow entities that are necessary to describe work procedures, and the enacting component supports users to play essential roles of invoking, executing and monitoring instances of the workflow model defined by the modeling component. Especially, from the organizational intelligence point of view, the modeling component deals with the planned (or workflow build-time aspect) knowledge of organizational resources allocations for workflow-supported operations, while on the other the enacting component concerns about the executed (or workflow run-time aspect) knowledge of organizational resources allotments for the workflow-supported operations. With being connected to these view-points, there might be two issues, such as discovery issue <ref type="bibr">[3]</ref> and rediscovery issues <ref type="bibr">[4]</ref>, in terms of the organizational knowledge discovery activities. In other words, the workflow knowledge discovery issue has something to do with exploring the planned knowledge from workflow models defined by the modeling component, and the workflow knowledge rediscovery issue is to explore the executed knowledge from the execution logs of the workflow models. Conclusively, the demo-system, wOISpaan, is able to discover, analyze, and visualize the planned knowledge of workflow performer-activity affiliations and allotments on a workflow model or a group of workflow models.</p><p>Availability of the System. The system's development environments are listed as followings. Particularly, we suppose that the XPDL workflow package's release version is XPDL 1.0. So, it is necessary to be refurbished to support the recently released version of XPDL 2.0 or more, which reflects the BPMN<ref type="foot" target="#foot_10">4</ref> graphical constructs.</p><p>-Operating System: Windows 7 Ultimate 64bit -Programming Language: Java Development Toolkit v.6.0 -XPDL Version: XPDL 1.0 -Development Tool: Eclipse Indigo Release 2 -Libraries: Awt/Swing, Prefuse, Xpdl However, the system's execution environments are any types of operating systems, and the executable system is available on the website of the authors' research group, the collaboration technology research lab, at the department of computer science, Kyonggi University, https://ctrl.kyonggi.ac.kr/wois.html, and anyone can download the executable system and its demo workflow models in XPDL after registering as a member of the wOIS-paan's user group.</p><p>Use Cases and Features. The workflow performer-activity affiliation networking knowledge can be not only discovered from a workflow model defined by the modeling component, but also rediscovered from its execution event logs stored by the enacting component. In this demo-paper, we focus on the discovering issue of the workflow performer-activity affiliation networking knowledge from a workflow model. That is, the system's use cases are related with the discovering, analyzing, and visualizing features of the planned knowledge of performer-activity affiliations and allotments. The major use cases and their crucial features are listed as the followings:</p><p>-Discovery Use Case : Import XPDL-based workflow models or packages, Discover the wOIS-paan knowledge, and Generate the bipartite matrix from the discovered knowledge -Analysis Use Case : Calculate the degree centrality of each performer and each activity, and Measure the group-degree centrality of the corresponding workflow models (or packages) -Visualization Use Case : Visualize the graph nodes and edges between performer and activity in a graphical form of the force-directed-layout of the Prefuse toolkit The essential functional components being comprised of the system are bipartite matrix generation functionality <ref type="bibr">[2]</ref>, knowledge visualization functionality, and knowledge analysis functionality, and these components also can be systematically implemented by using the Java programming language. Fig. <ref type="figure" target="#fig_0">1</ref> illustrates a system architecture of the implemented wOIS-paan knowledge discovery system, which is made up of four groups of architectural components-wOISpaan Window-control, knowledge visualization, bipartite matrix generation, and knowledge analysis components. Particularly, the XPDL parser of the analysis components group takes charge of generating a performer-activity bipartite matrix from an XPDL-based workflow package 5 , and the social graph visualizer of the visualization components group depicts the wOIS-paan knowledge as a bipartite graph transformed from the bipartite matrix. In terms of the wOIS-paan knowledge analysis aspect, the system is theoretically backed up by the extended versions of the workload-centrality analysis equations <ref type="bibr">[1]</ref>, such as actor-degree centrality analysis equations and group-degree centrality analysis equations, so as to mathematically analyze a workflow performer-activity affiliation network model discovered from an XPDL-based workflow package.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Significance to the BPM field with a Case Study</head><p>As an operational example, we try to discover wOIS-paan knowledge from the XPDL-based pseudo-workflow packages arranged in Table <ref type="table">1</ref>. We suppose that there are two pseudo-workflow packages, each of which has two workflow models and three workflow models, respectively, and all fifty activities have been conducted by all of the sixteen performers. Consequently, the system is able to successfully discover a wOIS-paan knowledge from the pseudo-workflow packages, and visualize the discovered knowledge as shown in the captured-screen of Fig. <ref type="figure" target="#fig_2">2</ref>. In the visualized wOIS-paan knowledge as colored bipartite graph, boxes and circles imply performers and activities, respectively, and the bold-colored box and its linked circles represent the performer, Alan, and his affiliated 11 activities, such as α 1 , α 9 , α 10 , α 16 , α 21 , α 26 , α 33 , α 36 , α 39 , α 43 , α 50 . </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Conclusion</head><p>In this demo-paper, we suggested a possible way of projecting a special affiliation knowledge of the workflow-supported affiliation relations (involvement and participation behaviors) between workflow-based people and workflow-based activities by converging the social network techniques and the workflow discovering techniques. As a consequence of this suggestion, we have newly defined </p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Fig. 1 .</head><label>1</label><figDesc>Fig. 1. Example of an implemented instance of a business model.</figDesc><graphic coords="3,147.65,287.25,316.25,147.35" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>© 2013</head><label>2013</label><figDesc>VisionWaves B.V. All rights reserved.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Fig. 2 .</head><label>2</label><figDesc>Fig. 2. Example of a Process Manager Cockpit.</figDesc><graphic coords="5,152.50,303.95,283.20,156.60" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Fig. 1 :</head><label>1</label><figDesc>Fig. 1: Overview of CPN Tools with an example hybrid model for a hospital loaded.</figDesc><graphic coords="8,130.06,160.30,356.23,189.36" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Fig. 2 :</head><label>2</label><figDesc>Fig. 2: Two visualizations of the simple model from Fig. 1. The model itself is just visible below the visualizations.</figDesc><graphic coords="8,130.78,451.57,354.08,182.87" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_5"><head>Fig. 3 :</head><label>3</label><figDesc>Fig. 3: A colored Petri net model with an explicit process perspective (top) and (some of the) generated Java code from the model (bottom).</figDesc><graphic coords="9,129.37,449.67,356.62,186.78" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_6"><head>Fig. 1 .</head><label>1</label><figDesc>Fig. 1. Process engine architecture: classical (left) and proposed (right).</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_7"><head>Fig. 3 .</head><label>3</label><figDesc>Fig. 3. Data dependencies are a conservative extension to data format, parser, internal representation, and execution engine.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_8"><head>Fig. 1 :</head><label>1</label><figDesc>Fig. 1: Screenshot of the web-search interface.</figDesc><graphic coords="17,134.77,248.91,345.83,108.55" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_9"><head>Fig. 2 :</head><label>2</label><figDesc>Fig. 2: Screenshot of the interface for precision/recall analysis.</figDesc><graphic coords="18,134.77,256.51,345.83,171.62" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_10"><head>Fig. 3 :</head><label>3</label><figDesc>Fig. 3: Architecture of Process Model Search Platform</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_11"><head>Fig. 1 .</head><label>1</label><figDesc>Fig. 1. Screenshot from the Declare Maps Miner.</figDesc><graphic coords="27,194.73,115.83,225.90,139.95" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_12"><head>Fig. 2 .</head><label>2</label><figDesc>Fig. 2. Screenshot from the Declare Diagnoser.</figDesc><graphic coords="29,202.12,258.71,211.13,132.94" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_13"><head>Fig. 3 .</head><label>3</label><figDesc>Fig. 3. Screenshot from the Declare Analyzer.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_14"><head>Fig. 2 :</head><label>2</label><figDesc>Fig. 2: Screen shots of our miner running.</figDesc><graphic coords="34,114.47,487.65,249.90,179.90" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_15"><head>Fig. 1 .</head><label>1</label><figDesc>Fig. 1. Architectural Components of the wOIS-paan Knowledge Discovery System</figDesc><graphic coords="38,145.68,322.47,324.00,134.49" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_16"><head>Table 1 .</head><label>1</label><figDesc>Specifications of the XPDL-based Pseudo-workflow Packages</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_17"><head>Fig. 2 .</head><label>2</label><figDesc>Fig. 2. Visualization of the Discovered wOIS-paan Knowledge by the System</figDesc><graphic coords="40,181.68,115.91,252.06,235.45" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_1"><head>pk: cp_id fk: o_id III [new] Order [sent] pk: o_id Computer Manufacturer case object: Order</head><label></label><figDesc></figDesc><table><row><cell>Order</cell><cell>Order</cell><cell>Order</cell></row><row><cell>[received]</cell><cell>[rejected]</cell><cell>[confirmed]</cell></row><row><cell>pk: o_id</cell><cell>pk: o_id</cell><cell>pk: o_id</cell></row><row><cell></cell><cell>Check order</cell><cell>Create component list</cell><cell>Process order</cell></row><row><cell></cell><cell></cell><cell>Components</cell></row><row><cell></cell><cell></cell><cell>[created]</cell></row><row><cell cols="4">Fig. 2. Build-to-order Process of a Computer Manufacturer</cell></row></table></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_0">http://code.google.com/p/jbpt/</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_1">http://processwave.org</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="3" xml:id="foot_2">The root request is created automatically and named after the process name</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="4" xml:id="foot_3">Although this requirement appears to be limitative, its for correctness purposes</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="5" xml:id="foot_4">http://vimeo.com/user4862900/worklr-demo-bpm2013 -Password: BPM2013</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_5">The miner is able to deal with non-atomic activities and to discover constraints involving different parts of the activities' lifecycle.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_6">For space reasons, the quality of the screenshots here presented is not the best. However, all the figures included in this paper are available at http://math.ut.ee/ ~fabrizio/BPMdemo13/demo/FiguresDemo2013.zip</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="3" xml:id="foot_7">We use the 2012 log as the 2013 log is much simpler with 819 ≤ m ≤ 7554 and 3 ≤ n ≤</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="4" xml:id="foot_8">.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="3" xml:id="foot_9">A group of workflow models is defined as a workflow package in the WfMC's standardization terminology.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="4" xml:id="foot_10">BPMN stands for Business Process Modeling Notations, and it is released by OMG's BMI (Business Modeling &amp; Integration) Domain Task Force.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="5" xml:id="foot_11">The system is able to handle a group of XPDL-based workflow models as well as individuals of the workflow models.</note>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Acknowledgements. We thank Kimon Batoulis, Sebastian Kruse, Thorben Lindhauer, and Thomas Stoff for extending the Camunda modeler [4]  in the course of their master project to support the modeling of processes with respect to the concepts described in [9].</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgement</head><p>This work was supported by national funds through FCT -Fundação para a Ciência e a Tecnologia, under project PEst-OE/EEI/LA0021/2013.</p><p>Acknowledgement. This research was supported by the Basic Science Research Program (Grant No. 2012006971) through the National Research Foundation of Korea.</p></div>
			</div>


			<div type="funding">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Support from the Basic Research Program of the National Research University</head><p>Higher School of Economics is gratefully acknowledged. This research is supported by the Danish Agency for Science, Technology and Innovation through an industrial PhD Grant.</p></div>
			</div>

			<div type="annex">
<div xmlns="http://www.tei-c.org/ns/1.0"><p>For the BPI challenge log from 2012 3  <ref type="bibr">[1]</ref> the parameters are m = 13, 087 and n = 24, yielding from 314, 088 (k = 1) to 66, 749, 981, 760 (k = 5) checks. ProM 6 has a Declare miner which systematically checks each constraint and returns a model comprising constraints satisfied for a certain percentage of traces. Due to the complexity, this miner employs a couple of tricks, including forcing users to pick among interesting constraints <ref type="bibr">[3]</ref>, employing a priori reduction to avoid considering rarely occurring events <ref type="bibr">[3]</ref>, and considering the relationships between constraints to avoid mining some constraints <ref type="bibr">[4]</ref>. Even so, the ProM Declare miner did not mine a single constraint such as succession in 24 hours.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>• Low risk</head><p>High risk</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>X X</head><p>Fig. <ref type="figure">1</ref>: Model with a branch.</p><p>The assumption that rarely occurring events need not be checked is crucially dependent on the notion of support, i.e., that a constraint is only interesting if it is triggered. This is problematic in a case such as Fig. <ref type="figure">1</ref>. There we have a hypothetical XOR-split and -join. If the top path is chosen, constraints in the bottom part are never triggered, so their support is low, even though those are arguably more interesting. If we want to mine without enforcing high support, we cannot use a priori reduction. Exploitation of relationships between constraints is problematic if we want to display a simpler model, e.g., by removing constraints redundant due to transitivity. The problem here is that, generally, support of a removed constraint cannot be derived from support of the remaining constraints.</p><p>The MINERful miner <ref type="bibr">[2]</ref> uses regular expressions and global computations to mine constraints. First statistics are computed for the log and subsequently constraints are mined from these. The MINERful miner can mine all constraints for the 2012 BPI challenge log in approximately 26 seconds, but only supports a subset of all constraints. Computation of constraints from statistics makes it difficult to add new constraints, as it is necessary to develop and prove rules for doing so. For example, it is far from obvious how to extend this approach to also mine choices.</p><p>Removing all assumptions. Instead, we prefer to mine first and, subsequently, filter with full information. Not only does this avoid the problem of support and branches, it allows us full knowledge of all constraints, so we can remove redundant constraints more intelligently using simple patterns. Finally, this allows us to provide users with a slider and let them interactively (and locally) balance between simplicity and support.</p><p>Efficient preprocessing. We first make sure that our base mining algorithm is efficient. We transform the log into a simple array of arrays of identifiers instead of actual events. Declare constraints have a (finite) LTL semantics, which can be represented as a finite automaton. Using a precomputed mapping between event identifiers and automaton labels, we obtain a fast and memory-efficient replay of the log. We can mine most of Declare's 34 constraints (excluding 2 with four or more parameters) on the BPI challenge log in 249 seconds using 12 MB of memory (including storing the log and all wOIS-paan -Discovering Performer-Activity Affiliation Networking Knowledge from XPDL-based Workflow Models</p><p>Hyun Ahn 1 , Minjae Park 2 , and Kwanghoon Pio Kim Abstract. In this demo-paper, we implement a workflow-supported organizational intelligence system, which is named as wOIS-paan. The major functionality of the current version of the system is to explore "workflow performer-activity affiliation networking knowledge" from an XPDLbased workflow model, and to visualize the knowledge in a graphical form of the force-directed-layout of the Prefuse toolkit. The implemented system operates under a series of algorithms discovering, analyzing, measuring, and visualizing workflow performer-activity affiliation networking knowledge from an XPDL-based workflow package 3 , which represents involvement and participation relationships, after all, between a group of performers and a group of activities. The eventual goal of the system is to measure and visualize the human resource allotments and contributions in enacting a workflow procedure (or a group of workflow procedures) at a glance. Also, in terms of the scalability of the system, it can be extensible to show the organization-wide workflow procedures. Conclusively, the wOIS-paan system ought to be a very valuable tool for the BPM and workflow design and operational performance analyzers and consultants.</p><p>Keywords: workflow-supported social networking knowledge, workflow affiliation networking knowledge, organizational knowledge discovery, workflow intelligence</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Maturity</head><p>In general, a workflow management system consists of two components, the modeling component and the enacting component. The modeling component allows</p></div>			</div>
			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">Let&apos;s Go All the Way: From Requirements Via Colored Workflow Nets to a BPEL Implementation of a New Bank System</title>
		<author>
			<persName><forename type="first">H</forename><surname>Thomas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jeanne</forename><forename type="middle">G</forename><surname>Davenport</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Harris ; Aalst</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">B</forename><surname>Jørgensen</surname></persName>
		</author>
		<author>
			<persName><surname>Lassen</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of OTM Conferences (1)</title>
				<meeting>of OTM Conferences (1)</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2005">2007. 2005</date>
			<biblScope unit="volume">3760</biblScope>
			<biblScope unit="page" from="22" to="39" />
		</imprint>
	</monogr>
	<note>Competing on Analytics</note>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Declarative event-based workflow as distributed dynamic condition response graphs</title>
		<author>
			<persName><forename type="first">T</forename><surname>Hildebrandt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">R</forename><surname>Mukkamala</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Post-proc.of PLACES</title>
				<imprint>
			<date type="published" when="2010">2010. 2010</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Nested dynamic condition response graphs</title>
		<author>
			<persName><forename type="first">T</forename><surname>Hildebrandt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">R</forename><surname>Mukkamala</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Slaats</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of Fundamentals of Software Engineering (FSEN)</title>
				<meeting>of Fundamentals of Software Engineering (FSEN)</meeting>
		<imprint>
			<date type="published" when="2011-04">April 2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Automatic Structure-Based Code Generation from Coloured Petri Nets: A Proof of Concept</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">M</forename><surname>Kristensen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Westergaard</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of FMICS</title>
				<meeting>of FMICS</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2010">2010</date>
			<biblScope unit="page" from="215" to="230" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">CPN Tools 4: Multi-formalism and Extensibility</title>
		<author>
			<persName><forename type="first">M</forename><surname>Westergaard</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of ATPN. LNCS</title>
				<meeting>of ATPN. LNCS</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2013">2013</date>
			<biblScope unit="volume">7927</biblScope>
			<biblScope unit="page" from="400" to="409" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Declare: A Tool Suite for Declarative Workflow Modeling and Enactment</title>
		<author>
			<persName><forename type="first">M</forename><surname>Westergaard</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Maggi</surname></persName>
		</author>
		<ptr target="CEUR-WS.org" />
	</analytic>
	<monogr>
		<title level="m">Business Process Management Demonstration Track (BPMDemos</title>
		<title level="s">CEUR Workshop Proceedings</title>
		<imprint>
			<date type="published" when="2011">2011. 2011</date>
			<biblScope unit="volume">820</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">Mixing Paradigms for More Comprehensible Models</title>
		<author>
			<persName><forename type="first">M</forename><surname>Westergaard</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Slaats</surname></persName>
		</author>
		<author>
			<persName><surname>Aalst</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of BPM. LNCS</title>
				<meeting>of BPM. LNCS</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2005">2013. 2005</date>
			<biblScope unit="volume">8094</biblScope>
			<biblScope unit="page" from="245" to="275" />
		</imprint>
	</monogr>
	<note>YAWL: Yet Another Workflow Language</note>
</biblStruct>

<biblStruct xml:id="b7">
	<monogr>
		<ptr target="https://www.activiti.org/" />
		<title level="m">Activiti: Activiti BPM Platform</title>
				<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<monogr>
		<ptr target="https://www.bonitasoft.com/" />
		<title level="m">Bonitasoft: Bonita Process Engine</title>
				<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<monogr>
		<ptr target="https://www.camunda.org/" />
		<title level="m">Camunda: Camunda BPM platform</title>
				<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">Business artifacts: A data-centric approach to modeling business operations and processes</title>
		<author>
			<persName><forename type="first">D</forename><surname>Cohn</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Hull</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Data Eng. Bull</title>
		<imprint>
			<biblScope unit="volume">32</biblScope>
			<biblScope unit="issue">3</biblScope>
			<biblScope unit="page" from="3" to="9" />
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<monogr>
		<ptr target="https://www.jboss.org/jbpm/" />
		<title level="m">JBoss: jBPM Process Engine</title>
				<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">PHILharmonicFlows: Towards a Framework for Object-aware Process Management</title>
		<author>
			<persName><forename type="first">V</forename><surname>Künzle</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Reichert</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Software Maintenance and Evolution: Research and Practice</title>
		<imprint>
			<biblScope unit="volume">23</biblScope>
			<biblScope unit="issue">4</biblScope>
			<biblScope unit="page" from="205" to="244" />
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Robust and flexible error handling in the aristaflow bpm suite</title>
		<author>
			<persName><forename type="first">A</forename><surname>Lanz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Reichert</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Dadam</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">CAiSE Forum</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2010">2010. 2011</date>
			<biblScope unit="volume">72</biblScope>
			<biblScope unit="page" from="174" to="189" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">Modeling and Enacting Complex Data Dependencies in Business Processes</title>
		<author>
			<persName><forename type="first">A</forename><surname>Meyer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Pufahl</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Fahland</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Weske</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Business Process Management. LNCS</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2013">2013</date>
			<biblScope unit="volume">8094</biblScope>
			<biblScope unit="page" from="171" to="186" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">Data-driven modeling and coordination of large process structures</title>
		<author>
			<persName><forename type="first">D</forename><surname>Müller</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Reichert</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Herbst</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">OTM 2007</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2007">2007</date>
			<biblScope unit="volume">4803</biblScope>
			<biblScope unit="page" from="131" to="149" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">Business artifacts: An approach to operational specification</title>
		<author>
			<persName><forename type="first">A</forename><surname>Nigam</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">S</forename><surname>Caswell</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IBM Systems Journal</title>
		<imprint>
			<biblScope unit="volume">42</biblScope>
			<biblScope unit="issue">3</biblScope>
			<biblScope unit="page" from="428" to="445" />
			<date type="published" when="2003">2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">OMG: Business Process Model and Notation (BPMN)</title>
	</analytic>
	<monogr>
		<title level="j">Version</title>
		<imprint>
			<biblScope unit="volume">2</biblScope>
			<biblScope unit="issue">0</biblScope>
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">Flexibility in process-aware information systems</title>
		<author>
			<persName><forename type="first">M</forename><surname>Reichert</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Rinderle-Ma</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Dadam</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">ToPNoC</title>
		<imprint>
			<biblScope unit="volume">5460</biblScope>
			<biblScope unit="page" from="115" to="135" />
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<monogr>
		<title level="m" type="main">Database System Concepts</title>
		<author>
			<persName><forename type="first">A</forename><surname>Silberschatz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><forename type="middle">F</forename><surname>Korth</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Sudarshan</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2001">2001</date>
			<publisher>McGraw-Hill Book Company</publisher>
		</imprint>
	</monogr>
	<note>4th Edition</note>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">BPMN-Q: A Language to Query Business Processes</title>
		<author>
			<persName><forename type="first">A</forename><surname>Awad</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">EMISA</title>
		<imprint>
			<biblScope unit="volume">119</biblScope>
			<biblScope unit="page" from="115" to="128" />
			<date type="published" when="2007">2007</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">Managing Large Collections of Business Process Models-Current Techniques and Challenges</title>
		<author>
			<persName><forename type="first">R</forename><surname>Dijkman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">L</forename><surname>Rosa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><forename type="middle">A</forename><surname>Reijers</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Comput Ind</title>
		<imprint>
			<biblScope unit="volume">63</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page">91</biblScope>
			<date type="published" when="2012">2012</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">Similarity of Business Process Models: Metrics and Evaluation</title>
		<author>
			<persName><forename type="first">R</forename><surname>Dijkman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Dumas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Dongen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Käärik</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Mendling</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Inform Syst</title>
		<imprint>
			<biblScope unit="volume">36</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="498" to="516" />
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<analytic>
		<title level="a" type="main">Evaluation Measures for Similarity Search Results in Process Model Repositories</title>
		<author>
			<persName><forename type="first">M</forename><surname>Guentert</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kunze</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Weske</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ER &apos;</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2012">2012</date>
			<biblScope unit="volume">12</biblScope>
			<biblScope unit="page" from="214" to="227" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">Behavioral Similarity-A Proper Metric</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kunze</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Weidlich</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Weske</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">BPM &apos;11</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2011">2011</date>
			<biblScope unit="page" from="166" to="181" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<analytic>
		<title level="a" type="main">Local Behavior Similarity</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kunze</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Weske</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">BPMDS &apos;1</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2012">2012</date>
			<biblScope unit="volume">113</biblScope>
			<biblScope unit="page" from="107" to="120" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b26">
	<analytic>
		<title level="a" type="main">Petri Net Transformations for Business Processes-A Survey</title>
		<author>
			<persName><forename type="first">N</forename><surname>Lohmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Verbeek</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Dijkman</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Transactions on Petri Nets and Other Models of Concurrency II</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2009">2009</date>
			<biblScope unit="page" from="46" to="63" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b27">
	<analytic>
		<title level="a" type="main">Non-intrusive capture of business processes using social software</title>
		<author>
			<persName><forename type="first">D</forename><surname>Martinho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Silva</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">BPM Workshops</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2012">2012</date>
			<biblScope unit="page" from="207" to="218" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b28">
	<analytic>
		<title level="a" type="main">A recommendation algorithm to capture end-users&apos; tacit knowledge</title>
		<author>
			<persName><forename type="first">D</forename><surname>Martinho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Silva</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Business Process Management</title>
		<imprint>
			<biblScope unit="volume">7481</biblScope>
			<biblScope unit="page" from="216" to="222" />
			<date type="published" when="2012">2012</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b29">
	<monogr>
		<title level="m" type="main">An experiment on capturing business process from knowledge workers</title>
		<author>
			<persName><forename type="first">D</forename><surname>Martinho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">R</forename><surname>Silva</surname></persName>
		</author>
		<idno type="DOI">10.4121/uuid:d9769f3d-0ab0-4fb8-803b-0d1120ffcf54</idno>
		<imprint>
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
	<note>submitted to the BPMS2 Workshop at BPM2013 References 1. 3TU Data Center. BPI Challenge 2011 Event Log</note>
</biblStruct>

<biblStruct xml:id="b30">
	<analytic>
		<title level="a" type="main">Techniques for a Posteriori Analysis of Declarative Processes</title>
		<author>
			<persName><forename type="first">A</forename><surname>Burattin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Maggi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Van Der Aalst</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Sperduti</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">EDOC</title>
		<imprint>
			<date type="published" when="2012">2012</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b31">
	<analytic>
		<title level="a" type="main">Aligning event logs and declarative process models for conformance checking</title>
		<author>
			<persName><forename type="first">Massimiliano</forename><surname>Leoni</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Maria</forename><surname>Fabrizio</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Wilm</forename><forename type="middle">P</forename><surname>Maggi</surname></persName>
		</author>
		<author>
			<persName><surname>Aalst</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">BPM</title>
		<imprint>
			<date type="published" when="2012">2012</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b32">
	<monogr>
		<title level="m" type="main">Runtime verification of LTL-based declarative process models</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Maggi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Westergaard</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Montali</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Van Der Aalst</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b33">
	<monogr>
		<title level="m" type="main">Analyzing Vessel Behavior using Process Mining</title>
		<author>
			<persName><forename type="first">M</forename><surname>Fabrizio</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Arjan</forename><forename type="middle">J</forename><surname>Maggi</surname></persName>
		</author>
		<author>
			<persName><surname>Mooij</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">P</forename><surname>Wil</surname></persName>
		</author>
		<author>
			<persName><surname>Van Der Aalst</surname></persName>
		</author>
		<imprint/>
	</monogr>
	<note>chapter Poseidon book</note>
</biblStruct>

<biblStruct xml:id="b34">
	<analytic>
		<title level="a" type="main">Efficient discovery of understandable declarative models from event logs</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Maggi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">C</forename><surname>Bose</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Van Der Aalst</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">CAiSE</title>
				<imprint>
			<date type="published" when="2012">2012</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b35">
	<analytic>
		<title level="a" type="main">A knowledge-based integrated approach for discovering and repairing declare maps</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Maggi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">P J C</forename><surname>Bose</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Van Der Aalst</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">CAiSE</title>
				<imprint>
			<date type="published" when="2013">2013</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b36">
	<analytic>
		<title level="a" type="main">Monitoring business constraints with linear temporal logic: An approach based on colored automata</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Maggi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Montali</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Westergaard</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Van Der Aalst</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">BPM</title>
		<imprint>
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b37">
	<monogr>
		<title level="m" type="main">Process Mining: Discovery, Conformance and Enhancement of Business Processes</title>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Van Der Aalst</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2011">2011</date>
			<publisher>Springer</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b38">
	<monogr>
		<title level="m" type="main">Declarative Workflows: Balancing Between Flexibility and Support</title>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Van Der Aalst</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Pesic</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Schonenberg</surname></persName>
		</author>
		<imprint/>
		<respStmt>
			<orgName>Computer Science -R&amp;D</orgName>
		</respStmt>
	</monogr>
</biblStruct>

<biblStruct xml:id="b39">
	<analytic>
		<title level="a" type="main">Declare: A tool suite for declarative workflow modeling and enactment</title>
		<author>
			<persName><forename type="first">Michael</forename><surname>Westergaard</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Fabrizio</forename><surname>Maria</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Maggi</forename></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">BPM (Demos</title>
				<imprint>
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b40">
	<monogr>
		<title/>
		<idno type="DOI">10.4121/uuid:3926db30-f712-4394-aebc-75976070e91f</idno>
		<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b41">
	<analytic>
		<title level="a" type="main">A two-step fast algorithm for the automated discovery of declarative workflows</title>
		<author>
			<persName><forename type="first">C</forename><surname>Di Ciccio</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Mecella</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">CIDM 2013</title>
				<imprint>
			<publisher>IEEE</publisher>
			<date type="published" when="2013">2013</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b42">
	<analytic>
		<title level="a" type="main">Efficient discovery of understandable declarative process models from event logs</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Maggi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">P J C</forename><surname>Bose</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Van Der Aalst</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">CAiSE 2012. LNCS</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2012">2012</date>
			<biblScope unit="volume">7328</biblScope>
			<biblScope unit="page" from="270" to="285" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b43">
	<analytic>
		<title level="a" type="main">A knowledge-based integrated approach for discovering and repairing declare maps</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Maggi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">P J C</forename><surname>Bose</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Van Der Aalst</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">CAiSE 2013</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2013">2013</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b44">
	<monogr>
		<title level="m" type="main">Constraint-Based Workflow Management Systems: Shifting Controls to Users</title>
		<author>
			<persName><forename type="first">M</forename><surname>Pesic</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2008">2008</date>
			<pubPlace>Eindhoven</pubPlace>
		</imprint>
		<respStmt>
			<orgName>Beta Research School for Operations Management and Logistics</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">Ph.D. thesis</note>
</biblStruct>

<biblStruct xml:id="b45">
	<analytic>
		<title level="a" type="main">Better Algorithms for Analyzing and Enacting Declarative Workflow Languages Using LTL</title>
		<author>
			<persName><forename type="first">M</forename><surname>Westergaard</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of BPM. LNCS</title>
				<meeting>of BPM. LNCS</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2011">2011</date>
			<biblScope unit="volume">6896</biblScope>
			<biblScope unit="page" from="83" to="98" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b46">
	<analytic>
		<title level="a" type="main">Declare: A Tool Suite for Declarative Workflow Modeling and Enactment</title>
		<author>
			<persName><forename type="first">M</forename><surname>Westergaard</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Maggi</surname></persName>
		</author>
		<ptr target="CEUR-WS.org" />
	</analytic>
	<monogr>
		<title level="m">Business Process Management Demonstration Track (BPMDemos</title>
		<title level="s">CEUR Workshop Proceedings</title>
		<imprint>
			<date type="published" when="2011">2011. 2011</date>
			<biblScope unit="volume">820</biblScope>
		</imprint>
	</monogr>
	<note>mining results imported into Excel</note>
</biblStruct>

<biblStruct xml:id="b47">
	<monogr>
		<author>
			<persName><forename type="first">David</forename><surname>Knoke</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Song</forename><surname>Yang</surname></persName>
		</author>
		<title level="m">SOCIAL NETWORK ANALYSIS -2 nd Edition, Series: Quantitative Applications in the Social Sciences</title>
				<imprint>
			<publisher>SAGE Publications</publisher>
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b48">
	<analytic>
		<title level="a" type="main">A Workflow Affiliation Network Discovery Algorithm</title>
		<author>
			<persName><forename type="first">Haksung</forename><surname>Kim</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">ICIC Express Letters</title>
		<imprint>
			<biblScope unit="volume">6</biblScope>
			<biblScope unit="issue">3</biblScope>
			<biblScope unit="page" from="765" to="770" />
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b49">
	<analytic>
		<title level="a" type="main">A Workflow-based Social Network Discovery and Analysis System</title>
		<author>
			<persName><forename type="first">Kwanghoon</forename><surname>Kim</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the International Symposium on Data-driven Process Discovery and Analysis</title>
				<meeting>the International Symposium on Data-driven Process Discovery and Analysis<address><addrLine>Campione d&apos;Italia, ITALY</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2011-07-01">June 29 July 1, 2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b50">
	<analytic>
		<title level="a" type="main">Discovering Social Networks from Event Logs</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">P</forename><surname>Wil</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Hajo</forename><forename type="middle">A</forename><surname>Van Der Aalst</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Minseok</forename><surname>Reijers</surname></persName>
		</author>
		<author>
			<persName><surname>Song</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">COMPUTER SUPPORTED COOPERATIVE WORK</title>
		<imprint>
			<biblScope unit="volume">14</biblScope>
			<biblScope unit="issue">6</biblScope>
			<biblScope unit="page" from="549" to="593" />
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
