<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">GazeHD: Towards Measuring Effect of Depth of Field Controlled by Eye Tracking in 3D Environments</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Marc</forename><forename type="middle">Anthony</forename><surname>Berends</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics</orgName>
								<orgName type="department" key="dep2">Natural Sciences and Information Technologies</orgName>
								<orgName type="institution">University of Primorska</orgName>
								<address>
									<settlement>Koper</settlement>
									<country key="SI">Slovenia</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Jordan</forename><forename type="middle">Aiko</forename><surname>Deja</surname></persName>
							<email>jordan.deja@famnit.upr.si</email>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics</orgName>
								<orgName type="department" key="dep2">Natural Sciences and Information Technologies</orgName>
								<orgName type="institution">University of Primorska</orgName>
								<address>
									<settlement>Koper</settlement>
									<country key="SI">Slovenia</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">De La Salle University Manila</orgName>
								<address>
									<country key="PH">Philippines</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Nuwan</forename><forename type="middle">T</forename><surname>Attygalle</surname></persName>
							<email>nuwan.attygalle@famnit.upr.si</email>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics</orgName>
								<orgName type="department" key="dep2">Natural Sciences and Information Technologies</orgName>
								<orgName type="institution">University of Primorska</orgName>
								<address>
									<settlement>Koper</settlement>
									<country key="SI">Slovenia</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Matjaž</forename><surname>Kljun</surname></persName>
							<email>matjaz.kljun@upr.si</email>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics</orgName>
								<orgName type="department" key="dep2">Natural Sciences and Information Technologies</orgName>
								<orgName type="institution">University of Primorska</orgName>
								<address>
									<settlement>Koper</settlement>
									<country key="SI">Slovenia</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Faculty of Information Studies</orgName>
								<orgName type="institution">Novo Mesto</orgName>
								<address>
									<country key="SI">Slovenia</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Klen</forename><forename type="middle">Čopič</forename><surname>Pucihar</surname></persName>
							<affiliation key="aff0">
								<orgName type="department" key="dep1">Faculty of Mathematics</orgName>
								<orgName type="department" key="dep2">Natural Sciences and Information Technologies</orgName>
								<orgName type="institution">University of Primorska</orgName>
								<address>
									<settlement>Koper</settlement>
									<country key="SI">Slovenia</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Faculty of Information Studies</orgName>
								<orgName type="institution">Novo Mesto</orgName>
								<address>
									<country key="SI">Slovenia</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">GazeHD: Towards Measuring Effect of Depth of Field Controlled by Eye Tracking in 3D Environments</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">5CFFB6D6F4DD621749A478E092F36EEA</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-25T07:18+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>depth of field</term>
					<term>eye tracking</term>
					<term>tunnel test</term>
					<term>unity</term>
					<term>3D game</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Depth of Field (DoF) has been used in 3D software to imitate realistic vision to improve immersion and depth perception on 2D displays. However, traditional methods of introducing DoF use fixed focus point which is usually located in the center of the screen. This may lead to unwanted blur that could affect user immersion and game satisfaction. In this paper, we present GazeHD, a dynamic DoF system that uses eye tracking in order to actively focus at the position of user gaze whilst blurring other parts of the screen based on geometry of 3D environment. We evaluate dynamic DoF by running a user study (𝑛 = 5) including a tunnel test and a 3D game demonstration. The results show DoF does not improve depth perception. This was true for both mouse controlled and eye tracking controlled DoF. However users perceived higher immersion which also persisted in complex 3D scenes such as high fidelity first person video games.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>In order to imitate realistic vision in 3D games, software can try to simulates the depth of field (DoF) effect. It is applied to the scene camera generating imagery where objects in the scene are either blurred or sharp. The amount of blur is dependent on the properties of the camera, focusing point, and the 3D geometry of the scene (i.e. the distance between the object and the camera). This kind of visual distortion is intrinsic to our vision system so its introduction to 3D graphics may lead to a higher immersion when experiencing such virtual environments.</p><p>However, the standard implementation of DoF commonly uses a fixed focal point that is positioned in the center of the screen. In this way objects in the center of the screen are always in focus as the user moves though the 3D environment. The correct focal length is calculated based on the distance between the observer(i.e. scene camera) and the scene center point (i.e. the intersection point between the camera raycast and the surface in front of the camera) <ref type="bibr" target="#b0">[1]</ref>. However, if a user wants to look at content that is away from the screen centre, such content may be invisible due to blur. This potentially breaks the immersion of the experience, as the image does not accommodate for where the user is looking, and thus fails to fully imitate realistic human vision. Besides breaking the illusion this may also have a negative effect on depth perception.</p><p>Several studies have explored depth perception in 3D virtual environments . In the study by Naceri et al. the authors studied users' depth perception in 3D virtual environments <ref type="bibr" target="#b1">[2]</ref>. They compared two different Virtual Reality (VR) systems: the head mounted devices and immersive wide screen displays. The comparison was done by presenting a virtual environment containing different objects and asking the participants to compare their depth. The objects shown were placed at different depths, however their size was modified so that they always appeared to be of the same size. To achieve this, the size of the object was changed according to the depth position. This was done to eliminate the apparent size effects that would serve as a depth cue. The results showed significant differences between the two devices and highlighted the distance misestimation phenomenon for head mounted devices. Other studies explored depth perception and immersion in the scope of stereoscopic 3d rendering <ref type="bibr" target="#b2">[3]</ref> and 3D controlled DoF in stereoscopic displays <ref type="bibr" target="#b3">[4]</ref>. Another study looked at the effect DoF on immersion in 3D games <ref type="bibr" target="#b4">[5]</ref>.</p><p>Advances in low cost gaze-tracking technologies, such as Tobii Eye tracker 5 make it possible to track human gaze at an affordable cost in close to real time. This makes it possible to build a dynamic DoF system in which the focus point moves with user gaze. In study conducted by Mauderer et al. authors explored dynamic DoF and showed that it can lead to an increase in the perceived realism and can contribute to the perception of ordinal depth. Furthermore it also improved the distance between objects, however the authors found this is limited in its accuracy <ref type="bibr" target="#b5">[6]</ref>.</p><p>In this paper we attempt to verify this previous result on dynamic depth and extend it by exploring if such an effect can also be observed in situations where 3D objects are used and where the user is experiencing complex 3D scenes, such as high fidelity first person video games. Within this context we want to find out: (1) If eye tracking controlled DoF improves depth perception accuracy?, and (2) If eye tracking controlled DoF is preferred and offers higher immersion when compared to fixed and no DoF systems? To answer these questions we design and run a user study with 5 participants running two different tasks: a Tunnel Test and a 3D game called Spaceship Demo. The method, results and discussion are provided in sections hereafter.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Method</head><p>In this section we explain the method followed in the user study covering apparatus, task description, study design, study procedure and data collection techniques.</p><p>Apparatus: An application integrated with the Tobii 4C Eye Tracker tool was developed using Unity. We used a display with a resolution of 1920 px × 1080 px at a size of 53 cm × 30 cm, and with a refresh rate of 60 frames per second. The eye tracker scans and estimates the user's head position and gaze at a frequency of 90Hz. Throughout the experiment the user was sitting at the desk where mouse and keyboard were provided for interaction see Figure <ref type="figure" target="#fig_0">1</ref>.</p><p>Task and Study Design: We chose a within-subject design which has two independent variables: DoF mode and feedback. We compared three DoF modes: no DoF, mouse controlled DoF where the focus point moved with mouse pointer and eye tracking DoF where the focus point moved with gaze. In respect to feedback we compared conditions with and without feedback. The feedback was shown as text popup indicating if the user correctly completed the task. The feedback was included into the study design in order to explore learning effect. We were interested in finding out if users are capable of improving their performance when feedback is provided.</p><p>The dependent variables were score which indicates how many times the user successfully completed the task, total duration which indicates the total amount of time the user spent on the task and questionnaire response.</p><p>We run two tasks: Tunnel Test and Spaceship Demo. In the Tunnel Test the goal was to measure depth perception where the only depth cue is DoF. A 3D scene was generated showing two spheres placed at different depths. The size of spheres is scaled so that they appear of equal size forming symmetry inside a tunnel (see Figure <ref type="figure" target="#fig_0">1 left</ref>). The user was then asked to indicate which sphere was closer. This method has been previously used by <ref type="bibr" target="#b5">[6]</ref>, however within their experimentation they did not use untextured 3D objects (e.g. spheres), but instead used 2D surfaces with relatively complex textures.</p><p>The Spaceship Demo was built upon an open source game <ref type="bibr" target="#b6">[7]</ref>. We modified the game to enable all DoF modes. The game is a first person game controlled with a mouse and keyboard. The players are tasked to navigate tough 3D environment which helps them progress through a fixed story line. The story lasts for approximately 5 minutes, where the player can navigate and explore the virtual environment freely. In this task we only collected qualitative data. We composed questionnaires based on methods used in the works of <ref type="bibr" target="#b0">[1,</ref><ref type="bibr" target="#b4">5,</ref><ref type="bibr" target="#b2">3]</ref>.</p><p>Participants and Study Procedure We recruited 𝑛 = 5 university students as test subjects via convenience sampling. The study started with a brief explanation of the study goals and consent form approval. Participants were then sat in front of the computer. We then conducted the 5 point eye tracking calibration after which the first test (Tunnel Test) started. The participants were shown how to interact with the system after which the data capture started. In each DoF modes, the order of which were randomized and counterbalanced, the user repeated the task 20 times. We vary the difficulty of the task creating 4 levels. The higher the level the closer together are the two objects. This in tehory makes it more difficult to figure which out which object is closer. After completing the task the user answered a sort questionnaire. Afterwards, the same process was repeated with feedback enabled. This meant that the users were informed about the correctness of their answer after each task repetition.</p><p>The final test was Spaceship Demo test. The users played the game in each DoF modes, the order of which were randomized and counterbalanced. After completing the test the users filled in a questioner.</p><p>Data Collection: Thought Tunnel Test we collected task time and task completion score. At the end of each condition we also collected questionnaire responses. We inquired on the following topics: level of comfort, difficulty of estimating distance of objects, level of immersion and difficulty of navigating the scene. We followed the metrics and scales used in the study of <ref type="bibr" target="#b3">[4]</ref>. The results of Tunnel Test show there is no significant learning in any of the conditions (see Figure <ref type="figure" target="#fig_1">2</ref> top left and top middle graphs). This is true for both no-feedback and feedback conditions. When observing the results of total duration (see Figure <ref type="figure" target="#fig_1">2</ref> top right) we see the users performed the task faster in no DoF condition compared to mouse and eye tracking controlled DoF conditions. The results for task performance (see Figure <ref type="figure" target="#fig_1">2</ref> bottom row) show that none of the modes managed to consistently outperform the random selection. Furthermore there is no clear distinction in quantitative performance between the three modes we compared. The qualitative results collected in the form of responses to the questionnaires showed that users think the most compelling depth is available in eye tracking controlled DoF condition, however the difference is very small compare to no DoF condition. Furthermore, the no DoF condition was chosen as the most popular mode.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Results</head><p>In the Spaceship demo, the mean rating for navigation of the 3D environment, is highest for mouse controlled DoF followed closely by no DoF condition. Eye tracking controlled DoF has the highest mean ratings in questions 2 and 4, regarding the viewing comfort and level of immersion, respectfully. The rankings of the conditions in the Spaceship Test from best to worst, according to participants show that eye tracking controlled DoF was voted highest.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Discussion and Conclusion</head><p>In this research we explored the effects of eye tacking controlled DoF in 3D environments, compared to manually(mouse) controlled DoF. By using eye tracking controlled DoF, we keep the gaze point in focus which in turn imitates real life vision. We designed an experiment that measured both depth perception accuracy, and subjective preference for different aspects of 3D environments. We failed to find evidence the DoF improves depth perception. This was true for both mouse controlled DoF and eye tracking controlled DoF. However when considering users preferences our research shows that DoF can increase immersion. Furthermore we show this is true also true in complex 3D scenes, such as high fidelity first person video games. However, it is important to note that this study is limited with the number of participants, which prevented us from running statistical tests. Therefore these findings are of preliminary nature and should be corroborated by extending the user base.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Preview of setup: (Left) Tunnel Test and (Right) Spaceship Demo. Participants were seated at a desk and aligned in a way that their eyes steadily remained at around 42cm from the display.</figDesc><graphic coords="3,89.29,18.03,416.62,268.86" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: Tunnel Test Results: (Top left) mouse controlled and eye tracking controlled DoF scores without feedback across all 20 trials. Y-axis shows consecutive number of test repetition indicating the flow of time and highlighting if any learning happened over time. (Top middle) mouse controlled and eye tracking controlled DoF scores with feedback. (Top Right) average duration of task in each condition. (Bottom Left) total scores per condition. The black line shows baselines performance of random selection. (Bottom Right) total score per difficulty level. The black line again shows baselines performance of random selection.</figDesc><graphic coords="4,108.88,260.85,375.01,210.95" type="bitmap" /></figure>
		</body>
		<back>
			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">Depth-of-field blur effects for first-person navigation in virtual environments</title>
		<author>
			<persName><forename type="first">S</forename><surname>Hillaire</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Lécuyer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Cozot</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Casiez</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of ACM VRST</title>
				<meeting>of ACM VRST</meeting>
		<imprint>
			<date type="published" when="2007">2007</date>
			<biblScope unit="page" from="203" to="206" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Depth perception within virtual environments: Comparison between two display technologies</title>
		<author>
			<persName><forename type="first">A</forename><surname>Naceri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Chellali</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Dionnet</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Toma</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">International Journal On Advances in Intelligent Systems</title>
		<imprint>
			<biblScope unit="volume">3</biblScope>
			<date type="published" when="2010">2010</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Enhancing 3d applications using stereoscopic 3d and motion parallax</title>
		<author>
			<persName><forename type="first">I</forename><forename type="middle">K</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">M</forename><surname>Peek</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">C</forename><surname>Wünsche</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Lutteroth</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of the AUIC</title>
				<meeting>of the AUIC</meeting>
		<imprint>
			<date type="published" when="2012">2012</date>
			<biblScope unit="page" from="59" to="68" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Gaze-contingent depth of field in realistic scenes: The user experience</title>
		<author>
			<persName><forename type="first">M</forename><surname>Vinnikov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">S</forename><surname>Allison</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of ETRA</title>
				<meeting>of ETRA</meeting>
		<imprint>
			<date type="published" when="2014">2014</date>
			<biblScope unit="page" from="119" to="126" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Using an eye-tracking system to improve camera motions and depth-of-field blur effects in virtual environments</title>
		<author>
			<persName><forename type="first">S</forename><surname>Hillaire</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Lécuyer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Cozot</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Casiez</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of IEEE VR, IEEE</title>
				<meeting>of IEEE VR, IEEE</meeting>
		<imprint>
			<date type="published" when="2008">2008</date>
			<biblScope unit="page" from="47" to="50" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Depth perception with gazecontingent depth of field</title>
		<author>
			<persName><forename type="first">M</forename><surname>Mauderer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Conte</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">A</forename><surname>Nacenta</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Vishwanath</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of ACM CHI</title>
				<meeting>of ACM CHI</meeting>
		<imprint>
			<date type="published" when="2014">2014</date>
			<biblScope unit="page" from="217" to="226" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<monogr>
		<title level="m" type="main">The spaceship demo project using vfx graph and highdefinition render pipeline</title>
		<author>
			<persName><forename type="first">T</forename><surname>Iche</surname></persName>
		</author>
		<ptr target="https://blog.unity.com/technology/now-available-the-spaceship-demo-project-using-vfx-graph-and-high-definition-render" />
		<imprint>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
