<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">EnnCore: End-to-End Conceptual Guarding of Neural Architectures</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Edoardo</forename><surname>Manino</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">The University of Manchester</orgName>
								<address>
									<postCode>M13 9PL</postCode>
									<settlement>Manchester</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Danilo</forename><surname>Carvalho</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">The University of Manchester</orgName>
								<address>
									<postCode>M13 9PL</postCode>
									<settlement>Manchester</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Yi</forename><surname>Dong</surname></persName>
							<email>yi.dong@liverpool.ac.uk</email>
							<affiliation key="aff1">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">University of Liverpool</orgName>
								<address>
									<postCode>L69 3BX</postCode>
									<settlement>Liverpool</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Julia</forename><surname>Rozanova</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">The University of Manchester</orgName>
								<address>
									<postCode>M13 9PL</postCode>
									<settlement>Manchester</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Xidan</forename><surname>Song</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">The University of Manchester</orgName>
								<address>
									<postCode>M13 9PL</postCode>
									<settlement>Manchester</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Mustafa</forename><forename type="middle">A</forename><surname>Mustafa</surname></persName>
							<email>mustafa.mustafa@manchester.ac.uk</email>
							<affiliation key="aff0">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">The University of Manchester</orgName>
								<address>
									<postCode>M13 9PL</postCode>
									<settlement>Manchester</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Andre</forename><surname>Freitas</surname></persName>
							<email>andres.freitas@manchester.ac.uk</email>
							<affiliation key="aff0">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">The University of Manchester</orgName>
								<address>
									<postCode>M13 9PL</postCode>
									<settlement>Manchester</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">imec-COSIC</orgName>
								<orgName type="institution">KU Leuven</orgName>
								<address>
									<addrLine>Leuven-Heverlee</addrLine>
									<postCode>B-3001</postCode>
									<country key="BE">Belgium</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="institution">Idiap Research Institute</orgName>
								<address>
									<postCode>1920</postCode>
									<settlement>Martigny</settlement>
									<country key="CH">Switzerland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Gavin</forename><surname>Brown</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">The University of Manchester</orgName>
								<address>
									<postCode>M13 9PL</postCode>
									<settlement>Manchester</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Mikel</forename><surname>Luján</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">The University of Manchester</orgName>
								<address>
									<postCode>M13 9PL</postCode>
									<settlement>Manchester</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Xiaowei</forename><surname>Huang</surname></persName>
							<email>xiaowei@liverpool.ac.uk</email>
							<affiliation key="aff1">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">University of Liverpool</orgName>
								<address>
									<postCode>L69 3BX</postCode>
									<settlement>Liverpool</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Lucas</forename><surname>Cordeiro</surname></persName>
							<email>lucas.cordeiro@manchester.ac.uk</email>
							<affiliation key="aff0">
								<orgName type="department">Department of Computer Science</orgName>
								<orgName type="institution">The University of Manchester</orgName>
								<address>
									<postCode>M13 9PL</postCode>
									<settlement>Manchester</settlement>
									<country key="GB">U.K</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">EnnCore: End-to-End Conceptual Guarding of Neural Architectures</title>
					</analytic>
					<monogr>
						<imprint>
							<date/>
						</imprint>
					</monogr>
					<idno type="MD5">3710C686270C6C14B2621D6A1E545F6E</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-24T08:42+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>The EnnCore project addresses the fundamental security problem of guaranteeing safety, transparency, and robustness in neural-based architectures. Specifically, EnnCore aims at enabling system designers to specify essential conceptual/behavioral properties of neural-based systems, verify them, and thus safeguard the system against unpredictable behavior and attacks. In this respect, EnnCore will pioneer the dialogue between contemporary explainable neural models and full-stack neural software verification. This paper describes existing studies' limitations, our research objectives, current achievements, and future trends towards this goal. In particular, we describe the development and evaluation of new methods, algorithms, and tools to achieve fully-verifiable intelligent systems, which are explainable, whose correct behavior is guaranteed, and robust against attacks. We also describe how En-nCore will be validated on two diverse and high-impact application scenarios: securing an AI system for (i) cancer diagnosis and (ii) energy demand response.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Deep neural networks (DNNs) are computing models typically deployed for classification, decision-making, and pattern recognition problems <ref type="bibr" target="#b4">(Bishop 2006)</ref>. Recently, various safety-critical tasks deployed DNNs, e.g., Covid-19 diagnosis <ref type="bibr" target="#b35">(Nour, Cömert, and Polat 2020)</ref> and steering control in self-driving cars <ref type="bibr" target="#b54">(Wu et al. 2021)</ref>. In these contexts, however, incorrect classifications can cause severe damages. It is well-known in the literature that adversarial disturbances can make DNNs misclassify objects, thus causing severe damage to users of safety-critical systems. For example, Eykholt et al. <ref type="bibr" target="#b14">(Eykholt et al. 2018</ref>) described that noise and disturbances, such as graffiti on traffic signals, could result in target misclassification during operation. Moreover, as DNNs are difficult to interpret and debug, the whole scenario becomes even more problematic <ref type="bibr" target="#b29">(Lundberg and Lee 2017)</ref>. Hence, there is a need for techniques to assess their structures and verify their results and behavior. Consequently, there is a growing interest in verification and interpretability methods for ensuring and explaining safety, accuracy, and robustness for DNNs.</p><p>According to a recent survey on the state-of-the-art of verification and synthesis methods for cyber-physical systems <ref type="bibr" target="#b9">(Cordeiro, de Lima Filho, and Bessa 2020)</ref>, most papers published in the area in the past ten years only study the verification of safety properties over mathematical representations of DNNs. However, a top-to-bottom verification process of DNNs will need to cover various aspects, including, for example, the external phenomena with which the DNN models interact and evolve. Thus, there is a considerable gap between low-and high-level models and between engineering and theoretical research efforts.</p><p>The EnnCore project 1 , which stands for "End-to-End Conceptual Guarding of Neural Architectures", aims to fill this gap. It has ambitious cross-cutting and far-reaching goals to provide a coherent and self-containing framework for specifying a conceptual safeguard core to neural-based (NB) Artificial Intelligence (AI) systems and verifying their actual implementations considering security aspects. Our setting draws on all of the above aspects: it covers the full range, from engineering details to abstraction and verification, and reasoning and explainability about model evolution and learning.</p><p>As a result, EnnCore addresses a fundamental research problem to ensure the security of neural-enabled components by taking into account their entire lifecycle from development to deployment. Solving this problem has a farreaching impact on areas such as health and energy, which heavily depend on secure and trusted software components to meet safety-critical requirements. Hence, our overall research objective is to have a long-term impact on writing secure and trusted AI-based software components, thus contributing to a shared vision of fully-verifiable software, where underlying neural-based architectures are built with strong symbolic and mathematical guarantees.</p><p>To achieve this objective, EnnCore will design and validate a full-stack symbolic safeguarding system for NB architectures. It will advance the state-of-the-art in the development of secure DNN models by mapping, using, and extending explainability properties of existing neuro-symbolic DNN architectures (e.g., Graph Networks, Differentiable Inductive Logic Programming), thus safeguarding them with symbolic verification, abstract interpretation, and program synthesis methods. EnnCore will pioneer the interdisciplinary dialogue between explainable AI and formal verification. In particular, it will deliver safeguarding for NB architectures with the following properties: 1. Full-stack symbolic software verification: EnnCore will develop the first bit-precise and scalable symbolic verification framework to reason over implementations of DNNs, thereby providing additional guarantees of security properties concerning the underlying hardware. We will exploit state-of-the-art abstract interpretation and synthesis techniques to synthesize invariants to prune the state-space exploration and thus verify intricate security properties to ensure confidentiality, integrity, and availability. 2. Explainability/Interpretability: EnnCore will pioneer the integration of knowledge-based and neural explainability methods to support end-users specifying security constraints and diagnosing security risks in order to provide security assurances as NB models evolve. Attention will be given to the quantitative and qualitative characterization of semantic-drift phenomena in security scenarios. 3. Scalable: EnnCore will systematically combine contemporary symbolic methods for explaining, interpreting and verifying neural representations. In particular, we will develop a neuro-symbolic safeguard framework by linking the structural knowledge-based representation elements to the attentional architecture elements to achieve scalability and precision in an unprecedented manner. EnnCore will systematically validate the system using two different case studies from different domains: healthcare and energy, in order to achieve fully-verifiable intelligent systems that are explainable, ensure behavior correctness and are robust against unanticipated behaviors and attacks.</p><p>The remainder of the paper is organized as follows. In Section 2, we discuss the related work, including the limitation of existing studies. Section 3 describes a logical basis for proposing our approach as part of the EnnCore project, while Section 4 outlines our research objectives. Section 5 describe our current achievements broken down into four research areas to tackle the safety and security of NB architectures. Finally, we conclude and describe future work in Section 6.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Limitations of Existing Work</head><p>Explainable/Interpretable ML models. Doshi-Velez and Kim (Doshi-Velez and Kim 2017) define interpretability as "the ability to explain or to present in understandable terms to a human". Interpretability is an active area of machine learning. The recent Neuro-Symbolic (NS) architectures <ref type="bibr" target="#b16">(Garcez et al. 2019)</ref> inherit the strengths of deep learning models, while extending it with explainability and finegrained/abstractive reasoning capabilities. NS models such as Graph Networks (GNs) <ref type="bibr" target="#b1">(Alshahrani et al. 2017</ref>) and Differentiable ILP <ref type="bibr" target="#b30">(Manhaeve et al. 2018</ref>) operate over and depend upon knowledge bases and focus on addressing inference problems which require relational reasoning and combinatorial generalization. No prior work has exploited: (i) the use of knowledge-based neuro-symbolic architectures for supporting end-users communicating their security constraints and (ii) the combination of explainability with symbolic verification to assure security properties. Verification of DNN Models. Verification of DNN models has attracted lots of attention recently, including unique approaches from formal verification <ref type="bibr" target="#b24">(Huang et al. 2017;</ref><ref type="bibr" target="#b25">Katz et al. 2017;</ref><ref type="bibr" target="#b28">Lomuscio and Maganti 2017;</ref><ref type="bibr" target="#b55">Wu et al. 2020</ref>), which deals with the problem through exhaustive search, SMT constraint solving, MILP constraint solving, and reduction to two-player game, respectively. A key problem remains on the scalability -the theoretical complexity of the verification problem is NP-complete either on the number of hidden neurons <ref type="bibr" target="#b25">(Katz et al. 2017)</ref> or the input dimensions <ref type="bibr">(Ruan, Huang, and Kwiatkowska 2018)</ref>. This pessimistic result has led to the consideration of approximation methods, such as abstract interpretation <ref type="bibr">(Gehr et al. 2018)</ref>, interval analysis <ref type="bibr" target="#b18">(Li et al. 2019)</ref>, and polynomial approximation <ref type="bibr" target="#b18">(Huang et al. 2019)</ref>. These methods provide soundness guarantees to the result but cannot ensure completeness. Such a relaxation on the guarantees can improve the scale of the network models that the methods can work with but still cannot reach the industrial-scale network models, even when GPUbased Parallelisation is applied <ref type="bibr" target="#b41">(Ruan et al. 2019)</ref>. Besides, they are often restricted by the types of layers or activation functions they can work with.</p><p>The above observation has led to the development of the other thread of works called testing methods, which generate a large number of test cases to intensively test the existence of errors, such as <ref type="bibr" target="#b53">(Wicker, Huang, and Kwiatkowska 2018;</ref><ref type="bibr" target="#b47">Sun et al. 2018)</ref>. Furthermore, the generation of test cases may often be guided by the coverage metrics such as neuron coverage <ref type="bibr" target="#b37">(Pei et al. 2017)</ref> or MC/DC <ref type="bibr" target="#b46">(Sun et al. 2019)</ref>. While it is arguable whether the generated test cases are representative for the property to be verified, the testing results can be utilized to either understand the internal working mechanism <ref type="bibr" target="#b20">(Huang et al. 2021b</ref>) of neural network models or support safety argument <ref type="bibr" target="#b58">(Zhao et al. 2020a</ref>) together with the verification results. Please refer to a recent survey <ref type="bibr">(Huang et al. 2020)</ref>, or tutorial <ref type="bibr" target="#b42">(Ruan, Yi, and Huang 2021)</ref> for more discussions on the verification and testing techniques for neural network models. Verification of Actual Implementations of DNNs. While existing verification methods work with DNN models and adversarial examples (i.e., a small perturbation on a correctly-labeled input leads to a different classification), it has been pointed out in <ref type="bibr" target="#b36">(Odena et al. 2019</ref>) that there are errors in the Tensorflow graph representation of DNNs, a lower-level implementation of DNNs, such as numerical errors and disagreements between DNN implementations and their quantized versions. It is reasonable to believe that, when working with code-level implementations, e.g., on the Compute Unified Device Architecture (CUDA) and GPU hardware, there will be other errors, including security loopholes, that are more difficult to detect and mitigate than on CPU implementations <ref type="bibr" target="#b32">(Miele 2016;</ref><ref type="bibr" target="#b11">Di et al. 2020)</ref>.</p><p>Prior work focused on the verification of the robustness of the neural net with respect to its models <ref type="bibr" target="#b24">(Huang et al. 2017;</ref><ref type="bibr" target="#b25">Katz et al. 2017;</ref><ref type="bibr" target="#b47">Sun et al. 2018;</ref><ref type="bibr" target="#b59">Zheng et al. 2016)</ref>. In these approaches, off-the-shelf Satisfiability Modulo Theories (SMT) solvers are used to find robustness violations. However, this verification scheme cannot precisely capture issues that can be introduced in the implementations of DNNs. There exist four reasons: (i) one cannot model bit-level operations using the theory of integers and reals <ref type="bibr" target="#b8">(Cordeiro, Fischer, and Marques-Silva 2011)</ref>; (ii) libraries, such as TensorFlow, often take advantage of available Graphical Processing Units (GPUs) to explore the inherent parallelism of DNNs, so the translation to GPUs can be problematic; (iii) some security vulnerabilities cannot be detected in high-level models since they depend on implementation aspects (e.g., finite word-length); lastly (iv) there exists no connection between automated verification and explainability approaches, making it difficult to interrogate a system if something goes wrong.</p><p>Towards this, Pereira et al. <ref type="bibr" target="#b38">(Pereira et al. 2017)</ref> propose to verify CUDA programs written for GPU platforms with an SMT-based context-bounded checking technique. They developed ESBMC-GPU, which is the first verifier to discover adversarial cases and validate coverage methods in DNNs using the cuBLAS and cuDNN libraries <ref type="bibr" target="#b45">(Sena et al. 2019</ref>). However, Pereira et al. <ref type="bibr" target="#b38">(Pereira et al. 2017)</ref> do not exploit invariant inference to prune the state-space exploration for greater scalability. Also, their approach cannot explain the parameters of the DNN implementation to understand the root cause of errors.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Rationale and Approach</head><p>We believe that a holistic approach is necessary to overcome the challenges and limitations listed in Section 2. To this end, EnnCore will pioneer the dialogue between all the very different components of the contemporary AI safety stack (see Figure <ref type="figure" target="#fig_1">2</ref>).</p><p>On the one hand, we will draw inspiration and support from the diverse industrial experiences of our partners. For healthcare, digital Experimental Cancer Medicine Team (dECMT) 2 requires a provably correct, trusted, explainable 2 dECMT is a clinical digital research group based in the Cancer decision making for medical diagnosis. For energy, Urbanchain 3 requires a fair, explainable, and trusted decision making system to ensure the security and privacy of clients' data.</p><p>On the other hand, EnnCore will bridge the gap between the user's need to communicate their security constraints, and the technical challenges involved in formalising these constraints and checking whether neural-based system satisfy them. In this respect, we consider explainability/interpretability techniques as a fertile common ground for translating the user's requirements to rigorous mathematical constraints. Furthermore, we believe that exploiting the structure of such constraints, and the neural-based architecture that is required to satisfy them, is the key towards a truly scalable full-stack verification approach.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Research Objectives</head><p>EnnCore aims to fundamentally shift the state-of-the-art of what is achievable in formal verification of AI-based software systems to make them secure and trusted against unanticipated behavior and attacks. We are convinced that this cannot be achieved by a "proof-of-concept implementation" with an artificial case study. This particular approach will not have much credibility -and thus impact -with systems and software engineers. We will work in close collaboration with industrial partners to tackle real-world case studies in healthcare and energy domains. We will also use real data and work with domain experts to develop and validate our algorithms, methods and tools. In a multidisciplinary fashion, EnnCore will link two areas, which include neuro-symbolic and explainable machine learning and software verification, to deliver a full-stack security mechanism for DNNs operating in safety-critical scenarios. Our core objectives are: O1: Develop a novel conceptual/symbolic safeguard mechanism for neuro-symbolic platforms EnnCore will pioneer the use of neuro-symbolic architectures and explainability/interpretability mechanisms to support end-users specifying a conceptual safeguard core to neural-based AI systems. The project will also contribute to a broad and in-depth systematic analysis of the impact of existing explainability/interpretability mechanisms in security scenarios. These mechanisms include the interpretation of high-dimensional embeddings, attentional mechanisms, decoding from intermediate representations and black-box debugging methods using artificially generated datasets. O2: Develop scalable SMT theories and invariant inference methods for DNNs EnnCore will develop new SMT theories to reason about the safety and security of actual implementations of DNNs. Our ultimate goal is to mitigate security vulnerabilities and incorrect predictions, which make AI-based applications susceptible to errors and mischance. Additionally, EnnCore will develop a new invariant inference method based on the structure of the DNNs. We aim to simplify the DNN output computation for some input intervals using abstract interpretation and program synthesis. In particular, we will exploit invariant inference to prune the state-space exploration for verifying security properties in real implementations of DNNs. O3: Grounding, deploying and evaluating high-impact real-world use cases EnnCore will be co-designed with industrial and clinical partners around exemplary use-case scenarios. The selected use cases reflect standard security requirements for DNNs, which are transferable to other sectors such as automotive and consumer electronics. Additionally, usability is at the center of the unique value proposition of EnnCore, where the model can interface with end-users (system designers and security experts). We will allow users to state areas within the model that should be safeguarded.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Current Achievements and Future Trends</head><p>The proposed research is broken down into four research areas: Real Case-Studies &amp; Integrated Evaluation, Explainable Neuro-Symbolic Safeguard Framework, Symbolic Verification Framework for AI, and Verifying Security in Embedded Software running in GPUs. In the following, we describe the research contents of each area. In particular, we provide details of what has been achieved to date and what we intend to tackle as future work.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.1">Real Case-Studies &amp; Integrated Evaluation</head><p>EnnCore aims to tackle two real-world use cases in two distinct domains: health and energy. In the health domain, the use case is cancer diagnoses (Lee et al. 2021), where a medical institution (e.g., hospital) aims to determine if suspect patients have cancer or not based on analyzing a set of biomarkers. To achieve this, the medical institution deploys an AI model that uses the patients' biomarkers to predict the likelihood of a patient having (or developing) cancer. In the energy domain, the use case is demand response <ref type="bibr" target="#b0">(Albadi and El-Saadany 2008)</ref>, where an energy supplier company aims to match their customers' energy consumption with the energy supply available, in order to facilitate peer-to-peer energy trading <ref type="bibr" target="#b7">(Capper et al. 2021</ref>) without violations of the grid constraints <ref type="bibr" target="#b13">(Dudjak et al. 2021</ref>). To complete this efficiently and effectively, the supplier needs to predict the halfhourly electricity consumption of each of their customers. To achieve this, the energy supplier deploys an AI model that uses their customers' historical consumption data to predict their consumption data for the next half-hourly time slot. Unfortunately, this approach allows the supplier to have access to households' fine-grained consumption data, which poses a high risk to users' privacy <ref type="bibr" target="#b34">(Mustafa, Cleemput, and Abidin 2016)</ref> as well as hinders the adoption of smart meters <ref type="bibr" target="#b6">(Briggs, Fan, and Andras 2020)</ref>.</p><p>Up to now, we have performed security analyses of both use cases to identify potential threats to the AI models, hence specifying concrete security requirements/properties that these AI models should satisfy. Apart from the 'standard' confidentiality, integrity, and availability requirements, we have identified the following properties relevant to AI models: robustness, transparency, auditability (traceability), accountability, and privacy.</p><p>Robustness ensures that AI models are resilient against malicious input and corner cases (a.k.a adversarial examples). Transparency ensures that all phases of an AI model processing chain (including the technical details of the models and the training data used) are well documented. Auditability (traceability) ensures that all the processing steps of the AI models (i.e., cause-effect) can be traced by third parties if needed. Finally, accountability ensures evidence of who has developed/managed/maintained every component/step of the AI model. These four properties are closely related to each other and contribute to the explainability of AI models. On the other hand, privacy ensures that sensitive user data and sensitive AI models are protected from unauthorized entities, sometimes even from the companies that have developed and managed the AI models.</p><p>To ensure AI models' robustness against malfunction and attacks, one promising approach is to adopt robust training for AI models <ref type="bibr">(Gehr et al. 2018)</ref>. This ensures that the AI models are already fed with data representing potential corner cases in the training phases. To achieve this, the training data is usually augmented by adding a certain degree of randomness. The DiffAI framework <ref type="bibr" target="#b33">(Mirman, Gehr, and Vechev 2018)</ref> has successfully applied this approach to develop AI models that are provably robust. This is achieved by deploying abstract interpretation techniques by overapproximating the AI system's behavior. However, the DiffAI framework has been designed to process images. As a next step, we plan to adapt the DiffAI framework to process other types of input data, e.g., biomarkers.</p><p>To ensure that AI systems protect user-sensitive data, deploying Federated Learning (FL) <ref type="bibr" target="#b5">(Bonawitz et al. 2019</ref>) is a promising approach. FL, by design, allows end-users to train their models locally, never share their sensitive raw data, yet benefit from the data of others. This is achieved by sharing only the gradients of the locally trained and deployed AI models, which are then aggregated to build a global model, distributed to the end-users. Although it already provides a good level of user privacy protection, this approach has some limitations. For example, the gradients of the locally trained AI models can reveal information about the model itself and/or the data used for the local training of the models <ref type="bibr" target="#b30">(Melis et al. 2019;</ref><ref type="bibr" target="#b10">De Cristofaro 2021)</ref>. In addition, a single global AI model does not always provide the best possible outcome for all the end-users. To address these limitations, we plan to deploy advanced cryptographic techniques for secure computation (homomorphic encryption and multiparty computation) to perform the gradient aggregation and devise the global model in a secure way such that no entity has access to the gradients provided by the individual end-users. In addition, to improve performance, we plan to adopt a clustering method <ref type="bibr" target="#b43">(Sattler, Müller, and Samek 2021)</ref>, which would classify the end-users based on their data into several clusters, creating variants of the global model, which will contribute differently to the final model used by each of the individual end-users. Our approach will be tested on the energy use case to predict individual users' household electricity consumption data.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.2">Safeguards for Explainable Neuro-Symbolic Inference</head><p>EnnCore sets the vision of delivering neural representation models that are highly controlled regarding their inference properties. The key concept is to allow model developers and domain experts to encode complex symbolic and geometric constraints within the models (safeguards), allowing for more controlled inference and better disentanglement. Additionally, the project expands emerging probing and metamorphic testing methodologies to measure and qualify the internal properties of the latent representation.</p><p>For this work-stream, we focus on designing controlled embeddings for complex tasks in Natural Language Processing (NLP), emphasizing textual entailment and question answering. These tasks allow for the design of models which require the encoding of complex (i.e., requiring multiple semantic operations) and multi-hop natural language inference in an explainable manner.</p><p>The inference control methods are represented as explicit linguistic and inference constraints, which elicit abductive inference biases that are integrated into the latent model. The intuition is that universal patterns of abstract inference, such as abstraction and fact unification <ref type="bibr" target="#b51">(Valentino, Thayaparan, and Freitas 2021;</ref><ref type="bibr" target="#b50">Valentino, Pratt-Hartman, and Freitas 2021)</ref> can be programmed into the model, prescribing an expected inference pattern, which can facilitate generalization but also enforce more consistent inferences. Following the results achieved by encoding these constraints using Integer Linear Programming (ILP) <ref type="bibr" target="#b49">(Thayaparan, Valentino, and Freitas 2020)</ref>, which demonstrate its positive impact on inference control and explainability, we proposed ∂-Explainer <ref type="bibr" target="#b48">(Thayaparan et al. 2021)</ref>, an end-to-end differentiable architecture that integrates Convex Optimization such as Linear Programming with neural representations for abductive natural language inference. Specifically, we demonstrated that these models could integrate explicit inference constraints with Transformers-based sentence representations and train the architecture end-to-end to improve explanation generation and accuracy in multi-hop and abstractive reasoning tasks.</p><p>Part of the inference control mechanisms is expressed in the design of generative models for natural language inference with better disentanglement of latent factors. While representing the meaning of a sentence or an inference step in a continuous latent sentence space, models will aim for specializing latent dimensions to capture consistent linguistic and inference phenomena (e.g., tense variations for verbs), allowing for both interpretability and control. In <ref type="bibr" target="#b31">(Mercatali and Freitas 2021)</ref>, we proposed a variational autoencoder (VAE) model which better disentangles sentence discrete generative language factors. Recent work is expanding the same level of linguistic control via disentanglement for abstract sentences and multi-hop inference.</p><p>The level of additional control needs to be accompanied by methodologies that can measure and qualify the internal properties of these embedding spaces. For example, probing or diagnostic classification <ref type="bibr" target="#b17">(Hewitt and Liang 2019;</ref><ref type="bibr" target="#b15">Ferreira et al. 2021</ref>) is a method for investigating whether a set of intermediate (e.g., semantic) features are present in latent spaces. In EnnCore, we extend emerging methodologies such as metamorphic testing, geometric probing, and abstract inference to systematize the internal properties and consistency of controlled embedding spaces. Examples include the verification of abstract properties highly relevant to controlled inference such as monotonicity <ref type="bibr">(Rozanova et al. 2021)</ref> or variable substitution <ref type="bibr" target="#b15">(Ferreira et al. 2021)</ref>.</p><p>We also mention BayLIME <ref type="bibr" target="#b59">(Zhao et al. 2020b)</ref>, which is a novel explainable AI tool enhancing the well-known LIME tool with Bayesian reasoning to achieve better consistency in repeated explanations of a single prediction and better robustness to the hyper-parameters.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.3">Symbolic Verification Framework for AI</head><p>Verification refers to algorithms that determine whether or not a model satisfies some pre-specified property. Symbolic verification algorithms compute the intermediate results using a symbolic representation -such as BDD, SAT, and SMT. Usually, symbolic verification scales better than explicit verification, thanks to its memory efficiency and efficient computation. In the first year of EnnCore, we have explored a few directions on the symbolic verification techniques for AI, including working directly with the machine learning models. The other two directions aim to deal with the scalability issues through abstract models and acceptable solutions safety, respectively.</p><p>We considered two classes of machine learning models when working directly with the models. For convolutional neural networks (CNNs), a symbolic verification algorithm based on interval analysis and symbolic layer-by-layer propagation was developed in <ref type="bibr" target="#b57">(Yang et al. 2021;</ref><ref type="bibr" target="#b27">Li et al. 2020)</ref>, together with a global optimisation based method <ref type="bibr" target="#b56">(Xu, Ruan, and Huang 2021)</ref>. Second, for the random forest, an SMT-based method was considered to determine whether a model has been data poisoned by a backdoor attack <ref type="bibr" target="#b21">(Huang, Zhao, and Huang 2020)</ref>.</p><p>We also develop methods when scalability is an obstacle to the verification algorithms. For example, for deep reinforcement learning, we abstract its interactive behavior with the environment into a discrete-time Markov chain and then apply an off-the-shelf probabilistic model checker to do ver-ification <ref type="bibr" target="#b12">(Dong, Zhao, and Huang 2021)</ref>. For CNNs, we abstracted a model into a Bayesian network and then conducted probabilistic inference as the verification algorithm <ref type="bibr">(Berthier et al. 2021a,b)</ref>.</p><p>We also deal with scalability from the perspective of acceptable safety. In <ref type="bibr" target="#b19">(Huang et al. 2021a)</ref>, we developed a statistical certification algorithm for the robustness of CNNs, and in <ref type="bibr">(Zhao et al. 2021a,b)</ref>, we considered an acceptable level of reliability of CNNs. Moreover, coverage-guided testing is proven an effective way to quantify the quality of a recurrent neural network <ref type="bibr" target="#b20">(Huang et al. 2021b</ref>).</p><p>In addition to the above directions, we also developed our views in <ref type="bibr" target="#b42">(Ruan, Yi, and Huang 2021;</ref><ref type="bibr" target="#b22">Huang 2021)</ref>, which includes potential directions for exploration.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.4">Verifying Security in Embedded Software running in GPUs</head><p>We have developed and evaluated various verification strategies to detect errors in learning and classifications performed by DNNs. In particular, we analyzed potential failures of DNNs due to bugs in the implementation of the embedded software of the DNNs. Here, we distinguish two classes of bugs: 1) generic implementation errors, for instance, memory safety, arithmetic overflow, and division-by-zero; they can cause the implementation of the DNN to crash; 2) failure of the implementation to behave according to the high-level rules, which may cause miss-classifications.</p><p>In <ref type="bibr" target="#b45">(Sena et al. 2019</ref><ref type="bibr" target="#b44">(Sena et al. , 2021))</ref>, we develop and evaluate a novel symbolic verification framework using software model checking (SMC) and satisfiability modulo theories (SMT) to check for safety properties in quantized neural networks (QNNs). More specifically, we propose several QNN-related optimizations for SMC, including invariant inference via interval analysis, slicing, expression simplifications, and discretization of non-linear activation functions. We also quantified each technique's impact using different SMT solvers. We observed a significant performance improvement if we enabled slicing, interval analysis, and expression simplifications with the SMT solver Yices <ref type="bibr" target="#b44">(Sena et al. 2021)</ref>.</p><p>With this verification framework, we also provide formal guarantees on the safe behavior of QNNs implemented both in floating-and fixed-point arithmetic. In particular, we have observed that the verification time correlates with the number of bits used for ANN quantization. Interestingly, this correlation disappears for the number of bits above 14 due to the increasing state-space exploration. In this regard, our verification approach was able to verify and produce adversarial examples for 52 test cases spanning image classification and general machine learning applications. Furthermore, for small-to medium-sized QNNs, our approach completes most of its verification runs in minutes. In contrast to most state-of-the-art methods, our approach is not restricted to specific choices regarding activation functions and non-quantized representations. Finally, our experiments show that our approach can analyze larger ANN implementations and substantially reduce the verification time compared to state-of-the-art techniques that use SMT solving, e.g., Marabou <ref type="bibr" target="#b26">(Kim et al. 2016)</ref>. It is also competitive to verification approaches that employ symbolic interval, e.g., Neurify <ref type="bibr" target="#b52">(Wang et al. 2018)</ref>.</p><p>As future work, we plan to work in two directions. First, we aim to evaluate security properties in various real case studies. Second, we will lead extensive experiments to validate the implementations of DNNs for a set of case studies from our industrial partners. This procedure requires us to set an environment for running the implementations of DNNs in typical GPUs, which will rely on our prior work on the verification of GPU programs <ref type="bibr" target="#b38">(Pereira et al. 2017)</ref>. Additionally, creating the benchmarks for the experiments is a continuous and iterative task, consisting of two main steps: (i) creating benchmarks using real applications of DNNs; and (ii) using industry-standard benchmarks in close collaboration with our partners. Lastly, we will interpret and validate the results obtained during these experiments and then compare our approach using other state-of-the-art verification tools, similar to our recent work <ref type="bibr" target="#b44">(Sena et al. 2021)</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6">Conclusions</head><p>EnnCore contributes to the development of trustworthy neural-based systems, which are highly applicable to areas of high societal impact, such as reliable infrastructure management, defense, medical diagnosis and treatment, and fair/unbiased decision making. In particular, EnnCore emphasizes safety for medical diagnosis and treatment, with a use case targeting cancer. Personalized medicine requires the increasing use of automated data-driven methods. The EnnCore project can directly impact the reduction of the barriers to adopting AI-based methods in clinical settings, thereby democratizing personalized cancer diagnosis and treatment. Additionally, one of our industrial partners is currently acting as a blockchain-based supplier in the energy market. EnnCore tools will ensure the privacy and security of the clients' data in the energy sector. In particular, En-nCore will assist this industrial partner by providing innovative methods to protect their customers' data and applied algorithms. As a result, we expect the EnnCore tools to push the state-of-the-art on formal verification and explainability techniques to provide assurances about AI applications' security and explain their security properties.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: EnnCore methodology.</figDesc><graphic coords="2,85.51,67.45,175.49,156.48" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: EnnCore holistic approach.</figDesc><graphic coords="3,381.96,67.45,113.57,163.15" type="bitmap" /></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" xml:id="foot_0">Research UK Manchester Institute (https://digitalecmt.org/).3 Urbanchain develops a world-leading platform for energy generators in the wholesale market (https://www.urbanchain.co.uk/).</note>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgment</head><p>The work is funded by EPSRC grant EP/T026995/1 entitled "EnnCore: End-to-End Conceptual Guarding of Neural Architectures" under Security for all in an AI enabled society. Prof. Luján is funded by an Arm/RAEng Research Chair award and a Royal Society Wolfson Fellowship.</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">A summary of demand response in electricity markets</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">H</forename><surname>Albadi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">F</forename><surname>El-Saadany</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Electric power systems research</title>
		<imprint>
			<biblScope unit="volume">78</biblScope>
			<biblScope unit="issue">11</biblScope>
			<biblScope unit="page" from="1989" to="1996" />
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Neurosymbolic representation learning on biological knowledge graphs</title>
		<author>
			<persName><forename type="first">M</forename><surname>Alshahrani</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">A</forename><surname>Khan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Maddouri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">R</forename><surname>Kinjo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Queralt-Rosinach</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Hoehndorf</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Bioinformatics</title>
		<imprint>
			<biblScope unit="volume">33</biblScope>
			<biblScope unit="issue">17</biblScope>
			<biblScope unit="page" from="2723" to="2730" />
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<monogr>
		<title level="m" type="main">Abstraction and Symbolic Execution of Deep Neural Networks with Bayesian Approximation of Hidden Features</title>
		<author>
			<persName><forename type="first">N</forename><surname>Berthier</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Alshareef</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Sharp</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Schewe</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<idno>CoRR, abs/2103.03704</idno>
		<imprint>
			<date type="published" when="2021">2021a</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<monogr>
		<title level="m" type="main">Tutorials on Testing Neural Networks</title>
		<author>
			<persName><forename type="first">N</forename><surname>Berthier</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Ruan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<idno>CoRR, abs/2108.01734</idno>
		<imprint>
			<date type="published" when="2021">2021b</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Pattern recognition</title>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">M</forename><surname>Bishop</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Machine learning</title>
				<imprint>
			<date type="published" when="2006">2006</date>
			<biblScope unit="volume">128</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<monogr>
		<title level="m" type="main">Towards federated learning at scale: System design</title>
		<author>
			<persName><forename type="first">K</forename><surname>Bonawitz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Eichner</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Grieskamp</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Huba</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Ingerman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Ivanov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Kiddon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Konečnỳ</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Mazzocchi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><forename type="middle">B</forename><surname>Mcmahan</surname></persName>
		</author>
		<idno>CoRR, abs/1902.01046</idno>
		<imprint>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<monogr>
		<title level="m" type="main">Privacy Preserving Demand Forecasting to Encourage Consumer Acceptance of Smart Energy Meters</title>
		<author>
			<persName><forename type="first">C</forename><surname>Briggs</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Fan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Andras</surname></persName>
		</author>
		<idno>CoRR, abs/2012.07449</idno>
		<imprint>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">A Systematic Literature Review of Peer-to-Peer</title>
		<author>
			<persName><forename type="first">T</forename><surname>Capper</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Gorbatcheva</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">A</forename><surname>Mustafa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Bahloul</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">M</forename><surname>Schwidtal</surname></persName>
		</author>
		<ptr target="https://ssrn.com/abstract=3959620" />
	</analytic>
	<monogr>
		<title level="m">Community Self-Consumption, and Transactive Energy Market Models</title>
				<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">SMTbased bounded model checking for embedded ANSI-C software</title>
		<author>
			<persName><forename type="first">L</forename><surname>Cordeiro</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Fischer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Marques-Silva</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE TSE</title>
		<imprint>
			<biblScope unit="volume">38</biblScope>
			<biblScope unit="issue">4</biblScope>
			<biblScope unit="page" from="957" to="974" />
			<date type="published" when="2011">2011</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Survey on automated symbolic verification and its application for synthesising cyber-physical systems</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">C</forename><surname>Cordeiro</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">B</forename><surname>De Lima Filho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><forename type="middle">V</forename><surname>Bessa</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IET Cyber-Physical Systems: Theory and Applications</title>
		<imprint>
			<biblScope unit="volume">5</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="1" to="24" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">A critical overview of privacy in machine learning</title>
		<author>
			<persName><forename type="first">E</forename><surname>De Cristofaro</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE S&amp;P</title>
		<imprint>
			<biblScope unit="volume">19</biblScope>
			<biblScope unit="issue">4</biblScope>
			<biblScope unit="page" from="19" to="27" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Efficient Buffer Overflow Detection on GPU</title>
		<author>
			<persName><forename type="first">B</forename><surname>Di</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Li</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE TPDS</title>
		<imprint>
			<biblScope unit="volume">32</biblScope>
			<biblScope unit="issue">5</biblScope>
			<biblScope unit="page" from="1161" to="1177" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">Dependability Analysis of Deep Reinforcement Learning based Robotics and Autonomous Systems</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Dong</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Doshi-Velez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Kim</surname></persName>
		</author>
		<idno>CoRR, abs/1702.08608</idno>
	</analytic>
	<monogr>
		<title level="m">Towards a rigorous science of interpretable machine learning</title>
				<imprint>
			<date type="published" when="2017">2021. 2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Impact of local energy markets integration in power systems layer: A comprehensive review</title>
		<author>
			<persName><forename type="first">V</forename><surname>Dudjak</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Neves</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Alskaif</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Khadem</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Pena-Bello</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Saggese</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Bowler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Andoni</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Bertolini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Zhou</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Applied Energy</title>
		<imprint>
			<biblScope unit="volume">301</biblScope>
			<biblScope unit="page">117434</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">Robust physical-world attacks on deep learning visual classification</title>
		<author>
			<persName><forename type="first">K</forename><surname>Eykholt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Evtimov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Fernandes</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Rahmati</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Xiao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Prakash</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Kohno</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Song</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">CVPR</title>
				<imprint>
			<date type="published" when="2018">2018</date>
			<biblScope unit="page" from="1625" to="1634" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">Does My Representation Capture X? Probe-Ably</title>
		<author>
			<persName><forename type="first">D</forename><surname>Ferreira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Rozanova</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Thayaparan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Valentino</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Freitas</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ACL-IJCNLP</title>
				<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="194" to="201" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">Neural-symbolic computing: An effective methodology for principled integration of machine learning and reasoning</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">D</forename><surname>Garcez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Gori</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">C</forename><surname>Lamb</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Serafini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Spranger</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">N</forename><surname>Tran</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Gehr</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Mirman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Drachsler-Cohen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Tsankov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Chaudhuri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Vechev</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE S&amp;P</title>
		<imprint>
			<biblScope unit="page" from="3" to="18" />
			<date type="published" when="1905">2019. 1905. 2018</date>
		</imprint>
	</monogr>
	<note>AI2: Safety and robustness certification of neural networks with abstract interpretation</note>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">Designing and Interpreting Probes with Control Tasks</title>
		<author>
			<persName><forename type="first">J</forename><surname>Hewitt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Liang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">EMNLP</title>
				<imprint>
			<date type="published" when="2019">2019</date>
			<biblScope unit="page" from="2733" to="2743" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">ReachNN: Reachability Analysis of Neural-Network Controlled Systems</title>
		<author>
			<persName><forename type="first">C</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Fan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Q</forename><surname>Zhu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">ACM TECS</title>
		<imprint>
			<biblScope unit="volume">18</biblScope>
			<biblScope unit="issue">5s</biblScope>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">Statistical Certification of Acceptable Robustness for Neural Networks</title>
		<author>
			<persName><forename type="first">C</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Hu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Pei</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ICANN</title>
				<imprint>
			<date type="published" when="2021">2021a</date>
			<biblScope unit="page" from="79" to="90" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">Coverage-Guided Testing for Recurrent Neural Networks</title>
		<author>
			<persName><forename type="first">W</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Sharp</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Ruan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Meng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Tran. on Reliability</title>
		<imprint>
			<biblScope unit="page" from="1" to="16" />
			<date type="published" when="2021">2021b</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<monogr>
		<title level="m" type="main">Embedding and Extraction of Knowledge in Tree Ensemble Classifiers</title>
		<author>
			<persName><forename type="first">W</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<idno>CoRR, abs/2010.08281</idno>
		<imprint>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">Safety and reliability of deep learning: (brief overview)</title>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">VARS</title>
		<imprint>
			<biblScope unit="page" from="1" to="2" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<analytic>
		<title level="a" type="main">A survey of safety and trustworthiness of deep neural networks: Verification, testing, adversarial attack and defence, and interpretability</title>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Kroening</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Ruan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Sharp</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Thamo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Wu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Yi</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Computer Science Review</title>
		<imprint>
			<biblScope unit="volume">37</biblScope>
			<biblScope unit="page">100270</biblScope>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">Safety verification of deep neural networks</title>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kwiatkowska</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Wu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">CAV</title>
		<imprint>
			<biblScope unit="page" from="3" to="29" />
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<analytic>
		<title level="a" type="main">Reluplex: An efficient SMT solver for verifying deep neural networks</title>
		<author>
			<persName><forename type="first">G</forename><surname>Katz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Barrett</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">L</forename><surname>Dill</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Julian</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">J</forename><surname>Kochenderfer</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">CAV</title>
		<imprint>
			<biblScope unit="page" from="97" to="117" />
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b26">
	<analytic>
		<title level="a" type="main">CORONET; COVID-19 in Oncology evaluatiON Tool: Use of machine learning to inform management of COVID-19</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Kim</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Park</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Yoo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Choi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Yang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Shin</surname></persName>
		</author>
		<author>
			<persName><surname>Iclr</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Lee</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Wysocki ; Harrison</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Analyzing Deep Neural Networks with Symbolic Propagation: Towards Higher Precision and Faster Verification. In Static Analysis</title>
				<imprint>
			<date type="published" when="2016">2016. 2021. 2019</date>
			<biblScope unit="page" from="296" to="319" />
		</imprint>
	</monogr>
	<note>Compression of Deep Convolutional Neural Networks for Fast and Low Power Mobile Applications</note>
</biblStruct>

<biblStruct xml:id="b27">
	<analytic>
		<title level="a" type="main">Prodeep: a platform for robustness verification of deep neural networks</title>
		<author>
			<persName><forename type="first">R</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C.-C</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Yang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Xue</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Hermanns</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">ESEC/FSE</title>
		<imprint>
			<biblScope unit="page" from="1630" to="1634" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b28">
	<monogr>
		<title level="m" type="main">An approach to reachability analysis for feed-forward ReLU neural networks</title>
		<author>
			<persName><forename type="first">A</forename><surname>Lomuscio</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Maganti</surname></persName>
		</author>
		<idno>CoRR, abs/1706.07351</idno>
		<imprint>
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b29">
	<analytic>
		<title level="a" type="main">A unified approach to interpreting model predictions</title>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">M</forename><surname>Lundberg</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S.-I</forename><surname>Lee</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">NIPS</title>
				<imprint>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="4768" to="4777" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b30">
	<analytic>
		<title level="a" type="main">Exploiting unintended feature leakage in collaborative learning</title>
		<author>
			<persName><forename type="first">R</forename><surname>Manhaeve</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Dumančić</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Kimmig</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Demeester</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>De Raedt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Melis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Song</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>De Cristofaro</surname></persName>
		</author>
		<author>
			<persName><surname>Shmatikov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE S&amp;P</title>
		<imprint>
			<biblScope unit="page" from="691" to="706" />
			<date type="published" when="2018">2018. 2019</date>
		</imprint>
	</monogr>
	<note>Deepproblog: Neural probabilistic logic programming</note>
</biblStruct>

<biblStruct xml:id="b31">
	<monogr>
		<title level="m" type="main">Disentangling Generative Factors in Natural Language with Discrete Variational Autoencoders</title>
		<author>
			<persName><forename type="first">G</forename><surname>Mercatali</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Freitas</surname></persName>
		</author>
		<idno>CoRR, abs/2109.07169</idno>
		<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b32">
	<analytic>
		<title level="a" type="main">Buffer overflow vulnerabilities in CUDA: a preliminary analysis</title>
		<author>
			<persName><forename type="first">A</forename><surname>Miele</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Computer Virology and Hacking Techniques</title>
		<imprint>
			<biblScope unit="volume">12</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="113" to="120" />
			<date type="published" when="2016">2016</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b33">
	<analytic>
		<title level="a" type="main">Differentiable abstract interpretation for provably robust neural networks</title>
		<author>
			<persName><forename type="first">M</forename><surname>Mirman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Gehr</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Vechev</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ICML</title>
				<imprint>
			<date type="published" when="2018">2018</date>
			<biblScope unit="page" from="3578" to="3586" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b34">
	<analytic>
		<title level="a" type="main">A local electricity trading market: Security analysis</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">A</forename><surname>Mustafa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Cleemput</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Abidin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ISGT Europe</title>
				<imprint>
			<date type="published" when="2016">2016</date>
			<biblScope unit="page" from="1" to="6" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b35">
	<analytic>
		<title level="a" type="main">A novel medical diagnosis model for COVID-19 infection detection based on deep features and Bayesian optimization</title>
		<author>
			<persName><forename type="first">M</forename><surname>Nour</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Cömert</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Polat</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Applied Soft Computing</title>
		<imprint>
			<biblScope unit="volume">97</biblScope>
			<biblScope unit="page">106580</biblScope>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b36">
	<analytic>
		<title level="a" type="main">Tensorfuzz: Debugging neural networks with coverage-guided fuzzing</title>
		<author>
			<persName><forename type="first">A</forename><surname>Odena</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Olsson</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Andersen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Goodfellow</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ICML</title>
				<imprint>
			<date type="published" when="2019">2019</date>
			<biblScope unit="page" from="4901" to="4911" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b37">
	<analytic>
		<title level="a" type="main">DeepXplore: Automated whitebox testing of deep learning systems</title>
		<author>
			<persName><forename type="first">K</forename><surname>Pei</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Cao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Yang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Jana</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">SOSP</title>
				<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="1" to="18" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b38">
	<analytic>
		<title level="a" type="main">SMTbased context-bounded model checking for CUDA programs</title>
		<author>
			<persName><forename type="first">P</forename><surname>Pereira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Albuquerque</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Da Silva</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Marques</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Monteiro</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Ferreira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Cordeiro</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Concurrency and Computation: Practice and Experience</title>
		<imprint>
			<biblScope unit="volume">29</biblScope>
			<biblScope unit="issue">22</biblScope>
			<biblScope unit="page">e3934</biblScope>
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b39">
	<monogr>
		<title/>
		<author>
			<persName><forename type="first">J</forename><surname>Rozanova</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Ferreira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Thayaparan</surname></persName>
		</author>
		<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b40">
	<analytic>
		<title level="a" type="main">Reachability Analysis of Deep Neural Networks with Provable Guarantees</title>
		<author>
			<persName><forename type="first">M</forename><surname>Valentino</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Freitas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Ruan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kwiatkowska</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">IJCAI</title>
				<imprint>
			<date type="published" when="2018">2021. 2018</date>
			<biblScope unit="page" from="2651" to="2659" />
		</imprint>
	</monogr>
	<note>Supporting Context Monotonicity Abstractions in Neural NLI Models</note>
</biblStruct>

<biblStruct xml:id="b41">
	<analytic>
		<title level="a" type="main">Global Robustness Evaluation of Deep Neural Networks with Provable Guarantees for the Hamming Distance</title>
		<author>
			<persName><forename type="first">W</forename><surname>Ruan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Wu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Kroening</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kwiatkowska</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">IJCAI</title>
				<imprint>
			<date type="published" when="2019">2019</date>
			<biblScope unit="page" from="5944" to="5952" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b42">
	<analytic>
		<title level="a" type="main">Adversarial Robustness of Deep Learning: Theory, Algorithms, and Applications</title>
		<author>
			<persName><forename type="first">W</forename><surname>Ruan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Yi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ACM CIKM</title>
				<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="4866" to="4869" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b43">
	<analytic>
		<title level="a" type="main">Clustered Federated Learning: Model-Agnostic Distributed Multitask Optimization Under Privacy Constraints</title>
		<author>
			<persName><forename type="first">F</forename><surname>Sattler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K.-R</forename><surname>Müller</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Samek</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE TNNLS</title>
		<imprint>
			<biblScope unit="volume">32</biblScope>
			<biblScope unit="issue">8</biblScope>
			<biblScope unit="page" from="3710" to="3722" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b44">
	<monogr>
		<title level="m" type="main">Verifying Quantized Neural Networks using SMT-Based Model Checking</title>
		<author>
			<persName><forename type="first">L</forename><surname>Sena</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Song</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Alves</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Bessa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Manino</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Cordeiro</surname></persName>
		</author>
		<idno>CoRR, abs/2106.05997</idno>
		<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b45">
	<analytic>
		<title level="a" type="main">Incremental Bounded Model Checking of Artificial Neural Networks in CUDA</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">H</forename><surname>Sena</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><forename type="middle">V</forename><surname>Bessa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">R</forename><surname>Gadelha</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">C</forename><surname>Cordeiro</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Mota</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">SBESC</title>
		<imprint>
			<biblScope unit="page" from="1" to="8" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b46">
	<analytic>
		<title level="a" type="main">Structural Test Coverage Criteria for Deep Neural Networks</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Kroening</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Sharp</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Hill</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Ashmore</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">ACM TECS</title>
		<imprint>
			<biblScope unit="volume">18</biblScope>
			<biblScope unit="issue">5s</biblScope>
			<biblScope unit="page" from="1" to="23" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b47">
	<analytic>
		<title level="a" type="main">Concolic testing for deep neural networks</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Wu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Ruan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kwiatkowska</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Kroening</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">ASE</title>
		<imprint>
			<biblScope unit="page" from="109" to="119" />
			<date type="published" when="2018">2018</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b48">
	<monogr>
		<title level="m" type="main">∂ -Explainer: Abductive Natural Language Inference via Differentiable Convex Optimization</title>
		<author>
			<persName><forename type="first">M</forename><surname>Thayaparan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Valentino</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Ferreira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Rozanova</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Freitas</surname></persName>
		</author>
		<idno>CoRR, abs/2105.03417</idno>
		<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b49">
	<monogr>
		<title level="m" type="main">Ex-planationLP: Abductive Reasoning for Explainable Science Question Answering</title>
		<author>
			<persName><forename type="first">M</forename><surname>Thayaparan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Valentino</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Freitas</surname></persName>
		</author>
		<idno>CoRR, abs/2010.13128</idno>
		<imprint>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b50">
	<monogr>
		<title level="m" type="main">Do Natural Language Explanations Represent Valid Logical Arguments? Verifying Entailment in Explainable NLI Gold Standards</title>
		<author>
			<persName><forename type="first">M</forename><surname>Valentino</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Pratt-Hartman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Freitas</surname></persName>
		</author>
		<idno>CoRR, abs/2105.01974</idno>
		<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b51">
	<analytic>
		<title level="a" type="main">Unification-based Reconstruction of Multi-hop Explanations for Science Questions</title>
		<author>
			<persName><forename type="first">M</forename><surname>Valentino</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Thayaparan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Freitas</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">EACL</title>
				<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="200" to="211" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b52">
	<analytic>
		<title level="a" type="main">Efficient Formal Safety Analysis of Neural Networks</title>
		<author>
			<persName><forename type="first">S</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Pei</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Whitehouse</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Yang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Jana</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">NeurIPS</title>
		<imprint>
			<biblScope unit="page" from="6369" to="6379" />
			<date type="published" when="2018">2018</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b53">
	<analytic>
		<title level="a" type="main">Feature-guided black-box safety testing of deep neural networks</title>
		<author>
			<persName><forename type="first">M</forename><surname>Wicker</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kwiatkowska</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">TACAS</title>
		<imprint>
			<biblScope unit="page" from="408" to="426" />
			<date type="published" when="2018">2018</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b54">
	<analytic>
		<title level="a" type="main">SDLV: Verification of Steering Angle Safety for Self-Driving Cars</title>
		<author>
			<persName><forename type="first">H</forename><surname>Wu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Lv</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Cui</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Hou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Watanabe</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Kong</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Formal Aspects of Computing</title>
		<imprint>
			<biblScope unit="volume">33</biblScope>
			<biblScope unit="issue">3</biblScope>
			<biblScope unit="page" from="325" to="341" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b55">
	<analytic>
		<title level="a" type="main">A game-based approximate verification of deep neural networks with provable guarantees</title>
		<author>
			<persName><forename type="first">M</forename><surname>Wu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Wicker</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Ruan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kwiatkowska</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Theoretical Computer Science</title>
		<imprint>
			<biblScope unit="volume">807</biblScope>
			<biblScope unit="page" from="298" to="329" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b56">
	<analytic>
		<title level="a" type="main">Towards the Quantification of Safety Risks in Deep Neural Networks</title>
		<author>
			<persName><forename type="first">P</forename><surname>Xu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Ruan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Complex and Intelligent Systems</title>
				<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b57">
	<analytic>
		<title level="a" type="main">Enhancing robustness verification for deep neural networks via symbolic propagation</title>
		<author>
			<persName><forename type="first">P</forename><surname>Yang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Liu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C.-C</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Zhang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Formal Aspects of Computing</title>
				<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="page" from="1" to="29" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b58">
	<analytic>
		<title level="a" type="main">A Safety Framework for Critical Systems Utilising Deep Neural Networks</title>
		<author>
			<persName><forename type="first">X</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Banks</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Sharp</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Robu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Flynn</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Fisher</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Safe-Comp2020</title>
				<imprint>
			<date type="published" when="2020">2020a</date>
			<biblScope unit="page" from="244" to="259" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b59">
	<analytic>
		<title level="a" type="main">Assessing the Reliability of Deep Learning Classifiers Through Robustness Evaluation and Operational Profiles</title>
		<author>
			<persName><forename type="first">X</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Banks</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Cox</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Flynn</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Schewe</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Robu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Flynn</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Schewe</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Dong</surname></persName>
		</author>
		<author>
			<persName><forename type="first">; ;</forename><surname>Huang ; Zheng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Leung</surname></persName>
		</author>
		<author>
			<persName><surname>Goodfellow</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Detecting Operational Adversarial Examples for Reliable Deep Learning</title>
				<imprint>
			<date type="published" when="2016">2021a. 2020b. X. 2021b. 2016</date>
			<biblScope unit="page" from="4480" to="4488" />
		</imprint>
	</monogr>
	<note>CVPR</note>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
