<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">Driver Monitoring Systems in Automated Interactions: A Realtime, Thermographic-based Algorithm</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Saifeddine</forename><surname>Aloui</surname></persName>
							<email>saifeddine.aloui@cea.fr</email>
							<affiliation key="aff0">
								<orgName type="institution" key="instit1">Univ. Grenoble Alpes</orgName>
								<orgName type="institution" key="instit2">CEA</orgName>
								<address>
									<addrLine>Leti</addrLine>
									<postCode>F-38000</postCode>
									<settlement>Grenoble</settlement>
									<country key="FR">France</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Raphaël</forename><surname>Morvillier</surname></persName>
							<email>raphael.morvillier@cea.fr</email>
							<affiliation key="aff0">
								<orgName type="institution" key="instit1">Univ. Grenoble Alpes</orgName>
								<orgName type="institution" key="instit2">CEA</orgName>
								<address>
									<addrLine>Leti</addrLine>
									<postCode>F-38000</postCode>
									<settlement>Grenoble</settlement>
									<country key="FR">France</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Christophe</forename><surname>Prat</surname></persName>
							<email>christophe.prat@cea.fr</email>
							<affiliation key="aff0">
								<orgName type="institution" key="instit1">Univ. Grenoble Alpes</orgName>
								<orgName type="institution" key="instit2">CEA</orgName>
								<address>
									<addrLine>Leti</addrLine>
									<postCode>F-38000</postCode>
									<settlement>Grenoble</settlement>
									<country key="FR">France</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Jaka</forename><surname>Sodnik</surname></persName>
							<email>jaka.sodnik@fe.uni-lj.si</email>
							<affiliation key="aff1">
								<orgName type="department">Faculty of Electrical Engineering</orgName>
								<orgName type="institution">University of Ljubljana</orgName>
								<address>
									<addrLine>Tržaška c. 25</addrLine>
									<postCode>1000</postCode>
									<settlement>Ljubljana</settlement>
									<country key="SI">Slovenia</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="laboratory" key="lab1">Mind, Brain</orgName>
								<orgName type="laboratory" key="lab2">Behavior Research Center -CIMCYC</orgName>
								<orgName type="institution">University of Granada</orgName>
								<address>
									<addrLine>Campus de Cartuja s/n</addrLine>
									<postCode>18011</postCode>
									<settlement>Granada</settlement>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Carolina</forename><surname>Diaz- Piedra</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Francesco</forename><surname>Angioi</surname></persName>
							<email>frangioi@ugr.es</email>
							<affiliation key="aff2">
								<orgName type="laboratory" key="lab1">Mind, Brain</orgName>
								<orgName type="laboratory" key="lab2">Behavior Research Center -CIMCYC</orgName>
								<orgName type="institution">University of Granada</orgName>
								<address>
									<addrLine>Campus de Cartuja s/n</addrLine>
									<postCode>18011</postCode>
									<settlement>Granada</settlement>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Leandro</forename><forename type="middle">L Di</forename><surname>Stasi</surname></persName>
							<email>distasi@ugr.es</email>
							<affiliation key="aff2">
								<orgName type="laboratory" key="lab1">Mind, Brain</orgName>
								<orgName type="laboratory" key="lab2">Behavior Research Center -CIMCYC</orgName>
								<orgName type="institution">University of Granada</orgName>
								<address>
									<addrLine>Campus de Cartuja s/n</addrLine>
									<postCode>18011</postCode>
									<settlement>Granada</settlement>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">Driver Monitoring Systems in Automated Interactions: A Realtime, Thermographic-based Algorithm</title>
					</analytic>
					<monogr>
						<imprint>
							<date/>
						</imprint>
					</monogr>
					<idno type="MD5">85A85BC4CA191F2316EFD57B3E0D95A5</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-25T07:18+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Driver state</term>
					<term>Workload</term>
					<term>Facial thermography</term>
					<term>Real-time algorithm</term>
					<term>Automated vehicle</term>
					<term>Sensoring and real-time information</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Due to the progressive shift of responsibility from the driver to the vehicle itself in automated vehicle technologies, driver-centered innovations represent a key point for its advance. The socalled Driver Monitoring Systems (DMS) are therefore increasingly gaining importance in this context. One of the main aims of DMS is to estimate the driver's arousal levels in order to infer their cognitive state and capabilities. Even though the scientific literature is riddled with useful psychophysiological indices to estimate arousal levels [1], nowadays, arousal estimation is based on broad, mostly blink/gaze-related, indices. The reason is that actual implementation of reliable sensors in a feasible system able to collect, analyze, and interpret measurements in real-life conditions is still an open challenge. One of the alternatives to signal different cognitive states is facial skin temperature [2][3]. Infrared sensors that monitor heat loss have been shown useful to track facial skin temperature that indicate arousal modulations while driving [2][3]. Such intensive, laborious work to extract and analyze temperature changes in some facial landmarks is not reasonable in real-life applications <ref type="bibr" target="#b1">[2]</ref>. Here, we present the preliminary results obtained with a new software able to track, in real-time, drivers' facial-skin temperature changes.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>Due to the progressive shift of responsibility from the driver to the vehicle itself in automated vehicle technologies, driver-centered innovations represent a key point for its advance. The so-called Driver Monitoring Systems (DMS) are therefore increasingly gaining importance in this context. One of the main aims of DMS is to estimate the driver's arousal levels in order to infer their cognitive state and capabilities. Even though the scientific literature is riddled with useful psychophysiological indices to estimate arousal levels <ref type="bibr" target="#b0">[1]</ref>, nowadays, arousal estimation is based on broad, mostly blink/gaze-related, indices. The reason is that actual implementation of reliable sensors in a feasible system able to collect, analyze, and interpret measurements in real-life conditions is still an open challenge. One of the alternatives to signal different cognitive states is facial skin temperature <ref type="bibr" target="#b1">[2,</ref><ref type="bibr" target="#b2">3]</ref>. Infrared sensors that monitor heat loss have been shown useful to track facial skin temperature that indicate arousal modulations while driving. Such intensive, laborious work to extract and analyze temperature changes in some facial landmarks is not reasonable in real-life applications <ref type="bibr" target="#b1">[2]</ref>. Face landmarks extraction using color images has become of common use <ref type="bibr" target="#b3">[4]</ref> thanks to several libraries (e.g., Google's MediaPipe library <ref type="bibr" target="#b4">[5]</ref>). However, when applied to thermographic images, these libraries produce unsatisfactory results: the face is either not detected or the landmarks are not correctly aligned with the real face. Therefore, two main methods have been developed to perform landmark detection on thermographic images. The first method is to develop a dedicated system trained on annotated thermographic images [see 6]. This approach is still limited due to the lack of large thermographic databases. For example, Kopaczka and colleagues used a database containing 2,935 images <ref type="bibr" target="#b6">[7]</ref>. A database of this kind would not be useful for our data. In the present study, the drivers had to wear transparent face masks due to the COVID19 pandemic. This made it harder to apply landmark detection on thermographic images where the mask was visible. Indeed, although the masks were transparent to visible light, they were not in the wavelength used to measure the temperature, therefore hiding part of the driver's face. The second method uses an additional color camera to detect the facial landmarks and transfers them on the thermographic image (this process of aligning images from different sources is often referred to as "image registration"). In previous studies, authors detected the edges in both color and thermal images and match them to align the images <ref type="bibr" target="#b7">[8,</ref><ref type="bibr" target="#b8">9]</ref>. A simpler method is described in another work <ref type="bibr" target="#b9">[10]</ref>, based on an initial optical calibration between the two cameras. Goulart and colleagues used the same principle and add a post-processing step to enhance the transferred landmark position, based on a trained expert manual annotation <ref type="bibr" target="#b10">[11]</ref>. Here, we present the preliminary results obtained with a system based on this second method, able to track drivers' facial-skin temperature changes automatically after an initial calibration. It is a first step towards a fully automatic system, which could run in real-time in future vehicles. We present the principle of the system and analyze its performance. In a future work, we intend to show the usefulness of extracting the face temperature in an automated driving condition.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Material and methods</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.">Instruments</head><p>We used a sensorized driving simulator (Nervtech™ solution, see Figure <ref type="figure">1</ref>) running a SCANeR studio software (AVSimulation, v.DT2.5). Participants' facial skin temperature was constantly monitored with a thermographic camera (FLIR A325sc, with a resolution of 320 × 240, a NETD &lt; 50mK and an accuracy of ±2°C or ±2% of reading) synchronized with a color camera (infrared color camera, Intel® Realsense).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Figure 1:</head><p>The driving simulator employed in the study. Left, the simulator and its dome; right, the interior of the dome with the thermographic and color cameras on the top of the main screen.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.">Face temperature extraction algorithm</head><p>To extract participants' facial-skin temperature at specific locations, we developed an algorithm (Figure <ref type="figure" target="#fig_0">2</ref>) able to identify two facial landmarks (Points of Interest [POIs]), the tip of the nose and the forehead, as well as the background in a thermographic image. The solution was based on a dual camera setup (i.e., color camera and thermographic camera), with a spatial correspondence between the two. Specifically, the color camera output allows the extraction of the POIs using conventional facial landmarks extraction tools. Here, we used MediaPipe <ref type="bibr" target="#b4">[5]</ref>, the state-of-the-art landmark detection library. Once the POIs were detected, their positions were fitted into the thermographic camera output, using a geometric transformation <ref type="bibr" target="#b11">[12]</ref>. The algorithm uses a 3 × 3 transformation matrix (𝑇 𝑐𝑜𝑙𝑜𝑟→𝑡ℎ𝑒𝑟𝑚𝑎𝑙 ) to convert each POI position from the color camera spatial output to the thermographic spatial output. Each POI (landmark, 𝑙 𝑖 ) is defined by its coordinates in the color camera space (𝑙 𝑖 𝑐𝑜𝑙𝑜𝑟 ).</p><p>As detailed below, the coordinates in the thermographic camera space (𝑙 𝑖 𝑡ℎ𝑒𝑟𝑚𝑎𝑙 ) are obtained by multiplying 𝑙 𝑖 𝑐𝑜𝑙𝑜𝑟 by the transformation matrix 𝑇 𝑐𝑜𝑙𝑜𝑟→𝑡ℎ𝑒𝑟𝑚𝑎𝑙 (1). This transformation matrix is the multiplication of three matrices <ref type="bibr" target="#b1">(2)</ref>. The first describes a translation with coordinates [𝑡 𝑥 ,𝑡 𝑦 ] (3), the second describes a rotation around the center of the screen with angle 𝜃 (4) and the third describes a scaling with parameters [𝑠 𝑥 ,𝑠 𝑦 ] (5). Once the positions of the landmarks in the thermographic image space were found, the POIs temperature values were read in the image. Finally, we multiplied the result by the skin emissivity (0.98) to obtain the skin temperature.</p><formula xml:id="formula_0">𝑙 𝑖 𝑡ℎ𝑒𝑟𝑚𝑎𝑙 = 𝑇 𝑐𝑜𝑙𝑜𝑟→𝑡ℎ𝑒𝑟𝑚𝑎𝑙 𝑙 𝑖 𝑐𝑜𝑙𝑜𝑟<label>(1)</label></formula><formula xml:id="formula_1">𝑇 𝑅𝐺𝐵→𝑡ℎ𝑒𝑟𝑚𝑎𝑙 = 𝑇 𝑡𝑟𝑎𝑛𝑠 × 𝑇 𝑟𝑜𝑡 × 𝑇 𝑠𝑐𝑎𝑙𝑖𝑛𝑔<label>(2)</label></formula><formula xml:id="formula_2">𝑇 𝑡𝑟𝑎𝑛𝑠 = [ 1 0 𝑡 𝑥 0 1 𝑡 𝑦 0 0 1 ]<label>(3)</label></formula><formula xml:id="formula_3">𝑇 𝑟𝑜𝑡 = [ 𝑐𝑜𝑠(𝜃) −𝑠𝑖𝑛(𝜃) 0 𝑠𝑖𝑛(𝜃) 𝑐𝑜𝑠(𝜃) 0 0 0 1 ]<label>(4)</label></formula><formula xml:id="formula_4">𝑇 𝑠𝑐𝑎𝑙𝑖𝑛𝑔 = [ 𝑠 𝑥 0 0 0 𝑠 𝑦 0 0 0 1 ] (5)</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.3.">Calibration</head><p>The described system first needed to be calibrated to determine the parameters of the transformation matrix 𝑇 𝑐𝑜𝑙𝑜𝑟→𝑡ℎ𝑒𝑟𝑚𝑎𝑙 : 𝑡 𝑥 , 𝑡 𝑦 , 𝜃, 𝑠 𝑥 and 𝑠 𝑦 . Filippini and colleagues used a similar set-up and performed the calibration using a custom checkerboard <ref type="bibr" target="#b9">[10]</ref>, a method we found to be less precise in our situation.</p><p>We therefore developed a dedicated calibration software. It allows an operator to visualize simultaneously the color and the thermographic camera outputs, as shown in Figure <ref type="figure">3</ref>.</p><p>Figure <ref type="figure">3</ref>: Calibration software interface. On the left, the color image with the landmarks detected thanks to MediaPipe <ref type="bibr" target="#b4">[5]</ref>. On the right, the thermographic image with the corresponding landmarks that the operator has to translate, rotate, and scale to correspond to the driver's face.</p><p>On the color image, the operator can inspect the landmark detection performed by MediaPipe. A thermographic image shows if these landmarks are transferred correctly. If the result is not satisfying, additional translations, rotations and scaling of the landmarks "mask" can be done manually with the mouse. These transformations are recorded by the calibration software to compute the matrix 𝑇 𝑐𝑜𝑙𝑜𝑟→𝑡ℎ𝑒𝑟𝑚𝑎𝑙 . The calibration software finally saves the conversion parameters in a dedicated file which is used by the extraction software to automatically detect the POIs on the thermographic output.</p><p>In our experiment, we repeated the calibration procedure for each driver to compensate for slight differences in the positions of the cameras and head among different drivers.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.4.">Experimental design</head><p>To test our algorithm, we designed a 2 (traffic vs. low-traffic Traffic density) × 2 (automated [ADL4] vs. manual [MD] Driving modality) within-participants experiment. Thirty-five expert drivers (mean age = 41.61 years, standard deviation = 6.26 years) drove along two virtual scenarios (∼ 20 minutes [min] each) with varying traffic density. In both scenarios, the participants performed 10 min in MD and 10 min in ADL4. The order of the traffic density and driving mode was randomly balanced across drivers. During ADL4, they were instructed to supervise the system. We expect the arousal level of the drivers to be modulated by these conditions, as the manual driving mode and the high traffic condition are more demanding than the autonomous one and the low traffic condition respectively.  In order to validate the proposed algorithm, we selected randomly one of the two 20-min recordings (high or low traffic) for each driver. Then, we extracted one pair of color and thermographic images each 20 seconds. We obtained 65 images per driver and 2,340 in total. We then developed an annotation software to manually extract the temperature on these images. For each image, we pointed at two landmarks: the driver's forehead and the driver's nose tip. Four trained annotators performed the same procedure on the 2,340 images. Then, we computed on each image the mean of the four annotated positions of the nose tip and the forehead to establish the reference location of the nose tip and the forehead. Finally, we extracted the temperature at these locations to define the reference temperature.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.5.">Validation method</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.6.">Statistical study on the obtained data</head><p>After removing the drivers on who the algorithm performed worse (see section 3.1.2), we used the algorithm described in Section 2.2 to extract the face temperature of the remaining drivers (n = 28). In order to obtain more measurement points, the algorithm at this stage ran at a higher frequency compared to the validation phase: one each 2 sec instead of one each 20 sec. We were therefore able to remove extreme values (lower than 25°C and higher than 37°C) as well as the outliers by applying a moving median thresholding procedure. We finally took the mean of the remaining points on each of the four segments: High traffic -Manual driving, High traffic -Automated driving, Low traffic -Manual driving, Low traffic -Automated driving. This gave us four data points per driver that we later used in our statistical analysis.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Results</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1.">Validation of the algorithm</head><p>We first analyzed the algorithm performance in terms of position error in the thermographic image, measured in pixels. We computed the position error between the algorithm's output and the mean values provided by the four annotators (see 2.5). We also compared the position error of the mean position error of each annotator with respect to the overall mean value. Then, we analyzed the consequences of the algorithm position error in terms of temperature error, measured in degrees Celsius (°C).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1.1.">Position error</head><p>In Figure <ref type="figure" target="#fig_3">6</ref>, we present the errors' distributions of the algorithm and the annotators. As a reference, in our setup the nose tip measures approximately 10 x 10 pixels. When pointing at the nose, the algorithm performed worse than the annotators with respect to the mean of the annotators. The two main causes for high mismatches were landmarks estimation errors of MediaPipe and spatial correspondence errors due to head movements (head turning or bending). Surprisingly, the algorithm outperformed slightly the annotators on the forehead with respect to the mean of the annotators. Our interpretation is that for a human, it could be hard to define a precise location on a large area with no points of reference such as the forehead. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1.2.">Temperature error</head><p>Figure <ref type="figure" target="#fig_4">7</ref> shows the errors of the final temperature values computed by the algorithm. On the forehead, the temperature gradient was low, so the temperature error resulting from the position error was small. On the nose, however, the temperature gradient was higher, so the temperature error was also much higher compared to the forehead. Interestingly, the temperatures computed at the positions annotated by one annotator are consistently smaller than the temperatures computed at the mean of the annotated positions. This is because the face temperature exhibits a local peak on the nose and one individual annotator is further from this peak than the mean position of the four annotators. Looking at Figure <ref type="figure" target="#fig_5">8</ref>, we see that the mean absolute error of the nose temperature highly depends on the driver (it goes up to 1.6 °C for some drivers). For the statistical study, we excluded the 6 participants with an absolute error higher than 0.8 °C. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Conclusion and future works</head><p>The present work describes the first results obtained with an algorithm for tracking a driver's facial skin temperature during driving interactions. The algorithm consistently and effectively tracked participants' facial-skin temperature without interfering with their driving tasks. We have analyzed the position and temperature errors and for some drivers, tracking the nose tip temperature remains a challenge. Future systems should improve both the initial landmarks detection and the landmark transfer. The later could be achieved by measuring the distance between the cameras and the driver's face like previous studies <ref type="bibr" target="#b9">[10]</ref> or considering the face as a 3D shape. Also, a calibration-less process should be developed to be implemented in a real car. More analysis should be conducted before publishing the results of a statistical study based on this work.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: Architecture of the temperature extraction algorithm</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 4 :</head><label>4</label><figDesc>Figure 4: Experimental design. Each participant performed the tasks as it is illustrated above. The arrows indicate that traffic conditions and the driving mode were randomized across participants.</figDesc><graphic coords="4,175.25,345.78,244.44,105.60" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 5 :</head><label>5</label><figDesc>Figure 5: Annotation software interface. Left side: the annotator selects the POI and the frames to be annotated. Right side: the annotator points at the location of the POI on the thermographic image (in this example, the forehead and the nose tip are already annotated).</figDesc><graphic coords="4,157.25,523.33,280.41,107.10" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Figure 6 :</head><label>6</label><figDesc>Figure 6: Position error (a 2D distance in pixels) distribution for the forehead and the nose. At the top, we compared the algorithm to the mean of the annotators. At the bottom, we compared each annotator to the mean of the annotators.</figDesc><graphic coords="5,152.75,527.93,289.18,198.40" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Figure 7 :</head><label>7</label><figDesc>Figure 7: Temperature error (in °C) distribution for the forehead and the nose. At the top, we compared the algorithm to the mean of the annotators. At the bottom, we compared each annotator to the mean of the annotators.</figDesc><graphic coords="6,152.75,204.28,289.18,198.40" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_5"><head>Figure 8 :</head><label>8</label><figDesc>Figure 8: Algorithm mean absolute temperature error, for each driver (in °C).</figDesc><graphic coords="6,97.98,506.21,398.98,141.70" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0"><head></head><label></label><figDesc></figDesc><graphic coords="2,80.60,475.70,433.79,119.05" type="bitmap" /></figure>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.">Acknowledgements</head><p>This study was funded by the European Union's Horizon 2020 research and innovation programme under grant agreement No. 875597 -HADRIAN (Holistic Approach for Driver Role Integration and Automation Allocation for European Mobility Needs) project. This document reflects only the authors' view, the European Climate, Infrastructure and Environment Executive Agency (CINEA) is not responsible for any use that may be made of the information it contains. We thank Leila Maboudi (Polytechnic University of Turin, Italy) for her comments and assistance in language edition.</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">Hand-skin temperature response to driving fatigue: an exploratory study</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">L</forename><surname>Di Stasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Gianfranchi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Diaz-Piedra</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-030-50537-0_1</idno>
	</analytic>
	<monogr>
		<title level="m">HCI in Mobility, Transport, and Automotive Systems. Driving Behavior, Urban and Smart Mobility</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">H</forename><surname>Krömker</surname></persName>
		</editor>
		<meeting><address><addrLine>HCII; Cham</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2020">2020. 2020</date>
			<biblScope unit="volume">12213</biblScope>
			<biblScope unit="page" from="3" to="14" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Nasal skin temperature reveals changes in arousal levels due to time on task: An experimental thermal infrared imaging study</title>
		<author>
			<persName><forename type="first">C</forename><surname>Diaz-Piedra</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Gomez-Milan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">L</forename><surname>Di Stasi</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.apergo.2019.06.001</idno>
	</analytic>
	<monogr>
		<title level="j">Applied Ergonomics</title>
		<imprint>
			<biblScope unit="volume">81</biblScope>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<monogr>
		<title level="m" type="main">Panasonic develops drowsiness-control technology by detecting and predicting driver&apos;s level of drowsiness</title>
		<ptr target="https://news.panasonic.com/global/press/data/2017/07/en170727-3/en170727-3.html" />
		<imprint>
			<date type="published" when="2017">2017</date>
		</imprint>
		<respStmt>
			<orgName>Panasonic Corporation</orgName>
		</respStmt>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<monogr>
		<title level="m" type="main">A review of facial landmark extraction in 2D images and videos using deep learning, big data and cognitive computing</title>
		<author>
			<persName><forename type="first">M</forename><surname>Bodini</surname></persName>
		</author>
		<idno type="DOI">10.3390/bdcc3010014</idno>
		<imprint>
			<date type="published" when="2019">2019</date>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="page">14</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<monogr>
		<author>
			<persName><forename type="first">C</forename><surname>Lugaresi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Tang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Nash</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Mcclanahan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Uboweja</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Hays</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C.-L</forename><surname>Chang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">G</forename><surname>Yong</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Lee</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W.-T</forename><surname>Chang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Hua</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Georg</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Grundmann</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1906.08172</idno>
		<title level="m">MediaPipe: A framework for building perception pipelines</title>
				<imprint>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Thermal facial landmark detection by deep multi-task learning</title>
		<author>
			<persName><forename type="first">W.-T</forename><surname>Chu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y.-H</forename><surname>Liu</surname></persName>
		</author>
		<idno type="DOI">10.1109/MMSP.2019.8901710</idno>
	</analytic>
	<monogr>
		<title level="m">IEEE 21st International Workshop on Multimedia Signal Processing (MMSP)</title>
				<imprint>
			<date type="published" when="2019">2019. 2019</date>
			<biblScope unit="page" from="1" to="6" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">A fully annotated thermal face database and its application for thermal facial expression recognition</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kopaczka</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Kolk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Merhof</surname></persName>
		</author>
		<idno type="DOI">10.1109/I2MTC.2018.8409768</idno>
	</analytic>
	<monogr>
		<title level="m">IEEE International Instrumentation and Measurement Technology Conference (I2MTC)</title>
				<imprint>
			<date type="published" when="2018">2018. 2018</date>
			<biblScope unit="page" from="1" to="6" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">ThermalWrist: Smartphone thermal camera correction using a wristband sensor †</title>
		<author>
			<persName><forename type="first">H</forename><surname>Yoshikawa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Uchiyama</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Higashino</surname></persName>
		</author>
		<idno type="DOI">10.3390/s19183826</idno>
	</analytic>
	<monogr>
		<title level="j">Sensors</title>
		<imprint>
			<biblScope unit="volume">19</biblScope>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">Thermal-to-visible face alignment on edge map</title>
		<author>
			<persName><forename type="first">L</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Zheng</surname></persName>
		</author>
		<idno type="DOI">10.1109/ACCESS.2017.2712159</idno>
	</analytic>
	<monogr>
		<title level="j">IEEE Access</title>
		<imprint>
			<biblScope unit="volume">5</biblScope>
			<biblScope unit="page" from="11215" to="11227" />
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Facilitating the child-robot interaction by endowing the robot with the capability of understanding the child engagement: The case of mio amico robot</title>
		<author>
			<persName><forename type="first">C</forename><surname>Filippini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Spadolini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Cardone</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Bianchi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Preziuso</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Sciaretta</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Del Cimmuto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Lisciani</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Merla</surname></persName>
		</author>
		<idno type="DOI">10.1007/s12369-020-00661-w</idno>
	</analytic>
	<monogr>
		<title level="j">International Journal of Social Robotics</title>
		<imprint>
			<biblScope unit="volume">13</biblScope>
			<biblScope unit="page" from="677" to="689" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">Visual and thermal image processing for facial specific landmark detection to infer emotions in a child-robot interaction</title>
		<author>
			<persName><forename type="first">C</forename><surname>Goulart</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Valadão</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Delisle-Rodriguez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Funayama</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Favarato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Baldo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Binotte</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Caldeira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Bastos-Filho</surname></persName>
		</author>
		<idno type="DOI">10.3390/s19132844</idno>
	</analytic>
	<monogr>
		<title level="j">Sensors</title>
		<imprint>
			<biblScope unit="volume">19</biblScope>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<monogr>
		<title level="m" type="main">Linear Geometry</title>
		<author>
			<persName><forename type="first">R</forename><surname>Artzy</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1993">1993</date>
			<publisher>Dover Publications</publisher>
			<pubPlace>New York, NY</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
