<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">Approximating eye gaze with head pose in a virtual reality microteaching scenario for pre-service teachers. ⋆</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Ivan</forename><surname>Moser</surname></persName>
							<email>ivan.moser@ffhs.ch</email>
							<affiliation key="aff0">
								<orgName type="institution">Swiss Distance University of Applied Sciences</orgName>
								<address>
									<addrLine>Schinerstrasse 18</addrLine>
									<postCode>3900</postCode>
									<settlement>Brig</settlement>
									<country key="CH">Switzerland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Martin</forename><surname>Hlosta</surname></persName>
							<email>martin.hlosta@ffhs.ch</email>
							<affiliation key="aff0">
								<orgName type="institution">Swiss Distance University of Applied Sciences</orgName>
								<address>
									<addrLine>Schinerstrasse 18</addrLine>
									<postCode>3900</postCode>
									<settlement>Brig</settlement>
									<country key="CH">Switzerland</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Umesh</forename><surname>Ramnarain</surname></persName>
							<email>uramnarain@uj.ac.za</email>
							<affiliation key="aff0">
								<orgName type="institution">Swiss Distance University of Applied Sciences</orgName>
								<address>
									<addrLine>Schinerstrasse 18</addrLine>
									<postCode>3900</postCode>
									<settlement>Brig</settlement>
									<country key="CH">Switzerland</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University of Johannesburg</orgName>
								<address>
									<addrLine>Auckland Park</addrLine>
									<postCode>2006</postCode>
									<settlement>Johannesburg</settlement>
									<country key="ZA">South Africa</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Christo</forename><surname>Van Der Westhuizen</surname></persName>
							<email>christovdw@uj.ac.za</email>
							<affiliation key="aff1">
								<orgName type="institution">University of Johannesburg</orgName>
								<address>
									<addrLine>Auckland Park</addrLine>
									<postCode>2006</postCode>
									<settlement>Johannesburg</settlement>
									<country key="ZA">South Africa</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Mafor</forename><surname>Penn</surname></persName>
							<email>mpenn@uj.ac.za</email>
							<affiliation key="aff1">
								<orgName type="institution">University of Johannesburg</orgName>
								<address>
									<addrLine>Auckland Park</addrLine>
									<postCode>2006</postCode>
									<settlement>Johannesburg</settlement>
									<country key="ZA">South Africa</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Noluthando</forename><surname>Mdlalose</surname></persName>
							<email>nmdlalose@uj.ac.za</email>
							<affiliation key="aff1">
								<orgName type="institution">University of Johannesburg</orgName>
								<address>
									<addrLine>Auckland Park</addrLine>
									<postCode>2006</postCode>
									<settlement>Johannesburg</settlement>
									<country key="ZA">South Africa</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Koketso</forename><surname>Pila</surname></persName>
							<email>kpila@uj.ac.za</email>
							<affiliation key="aff1">
								<orgName type="institution">University of Johannesburg</orgName>
								<address>
									<addrLine>Auckland Park</addrLine>
									<postCode>2006</postCode>
									<settlement>Johannesburg</settlement>
									<country key="ZA">South Africa</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Ayodele</forename><surname>Ogegbo</surname></persName>
							<email>ayodeleo@uj.ac.za</email>
							<affiliation key="aff1">
								<orgName type="institution">University of Johannesburg</orgName>
								<address>
									<addrLine>Auckland Park</addrLine>
									<postCode>2006</postCode>
									<settlement>Johannesburg</settlement>
									<country key="ZA">South Africa</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">Approximating eye gaze with head pose in a virtual reality microteaching scenario for pre-service teachers. ⋆</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">80E67ABEC8FC7DD03ECA5450ADE78AA7</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2025-04-23T18:52+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>virtual reality, eye gaze, eye tracking, positional tracking, teacher education, microteaching, multimodal learning analytics, Orcid 0000-0003-2139-2421 (I. Moser)</term>
					<term>0000-0002-7053-7052 (M. Hlosta)</term>
					<term>0000-0002-2551-9058 (P. Bergamin)</term>
					<term>0000-0003-4548-5913 (U. Ramnarain)</term>
					<term>0000-0002-4762-8538 (C. V. d. Westhuizen)</term>
					<term>0000-0001-6217-328X (M. Penn)</term>
					<term>0000-0002-5094-1074 (N. Mdlalose)</term>
					<term>0000-0002-8539-0348 (K. Pila)</term>
					<term>0000-0002-4680-6689 (A. Ogegbo)</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Although immersive virtual reality (IVR) technology is becoming increasingly accessible, head-mounted displays with eye tracking capability are more costly and therefore rarely used in educational settings outside of research. This is unfortunate, since combining IVR with eye tracking can reveal crucial information about the learners' behavior and cognitive processes. To overcome this issue, we investigated whether the positional tracking of learners during a short teaching exercise in IVR (i.e., microteaching) may predict the actual fixation on a given set of classroom objects. We analyzed the positional data of pre-service teachers from 23 microlessons by means of a random forest and compared it to two baseline models. The algorithm was able to predict the correct eye fixation with an F1-score of .8637, an improvement of .5770 over inferring eye fixations based on the forward direction of the IVR headset (head gaze). The head gaze itself was a .1754 improvement compared to predicting the most frequent class (i.e., Floor). Our results indicate that the positional tracking data can successfully approximate eye gaze in an IVR teaching scenario, making it a promising candidate for investigating the pre-service teachers' ability to direct students' and their own attentional focus during a lesson.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>Immersive virtual reality (IVR) enables the delivery of educational content in situations where traditional in-person instruction would be dangerous, impossible, counterproductive, or simply too expensive <ref type="bibr" target="#b0">[1]</ref>. Not surprisingly, there has been a steady increase in research interest, investigating the promise and pitfalls of VR in education <ref type="bibr" target="#b1">[2]</ref>.</p><p>Besides the situational benefits, another important strength of IVR is hardware related. Modern consumer IVR headsets are equipped with an array of various built-in sensors. Originally designed to enable and enhance the experience of immersive games, they can also be exploited for the purpose of gathering real-time user data that can be related to the learning process and outcome. For example, positional data can provide insight about learning outcome <ref type="bibr" target="#b2">[3]</ref>, cognitive load <ref type="bibr" target="#b3">[4]</ref>, and social interactions <ref type="bibr" target="#b4">[5]</ref>.</p><p>One particular sensor that has been previously hardly accessible but is finding its way into consumer devices is eye tracking. Put simply, video-based eye trackers emit infrared/nearinfrared light and utilize the resulting corneal reflections and their spatial relation to the center of the pupil to estimate eye gaze vectors <ref type="bibr" target="#b5">[6]</ref>. In combination with IVR, eye tracking offers unprecedented opportunities to study human behavior and cognition <ref type="bibr" target="#b6">[7]</ref>. IVR allows creating highly realistic and controlled environments, and modern game engines make it relatively easy to record gaze directions and areas of interest (AOI) compared to mobile eye tracking systems that track gaze in the real world. It has also been demonstrated that eye trackers integrated into IVR headsets achieve sufficient levels to reliably identify the current fixation location, provided that the gaze targets of interest are not in close proximity <ref type="bibr" target="#b7">[8]</ref>.</p><p>Consequently, the value of eye tracking in IVR could be demonstrated across a wide range of tasks. More specifically, eye tracking was shown to enhance user interactions in IVR, for example object selection <ref type="bibr" target="#b8">[9]</ref> or typing on a virtual keyboard <ref type="bibr" target="#b9">[10]</ref>. In the context of education and training, it is important to note that eye tracking can be used to infer cognitive load <ref type="bibr" target="#b10">[11]</ref>, joint attention of learners <ref type="bibr" target="#b11">[12]</ref>, and the distribution of teachers' visual attention in the classroom <ref type="bibr" target="#b12">[13]</ref>. This opens up many possibilities, ranging from personalized IVR learning experiences to enhanced performance feedback for learners and teachers, respectively.</p><p>However, despite the promising research findings, eye tracking is still underrepresented in practical settings outside a scientific context. It is conceivable that the higher cost of IVR headsets with integrated eye-trackers make these devices less accessible for educational use cases. This is even more relevant in the case of collaborative learning, where a classroom would need to be equipped with a higher number of head-mounted displays.</p><p>Therefore, this study set out to investigate whether the position and orientation (i.e. pose) of an IVR headset offers a viable approximation of eye gaze. The research question was driven by the idea that, provided head pose (hereafter referred to as head gaze) and eye gaze align sufficiently well, the former could be used to substitute the latter, therefore offering a low-cost alternative to IVR headsets with integrated eye trackers.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Related Work</head><p>Despite the high practical relevance, little research exists to date that studied whether head gaze can sufficiently approximate eye gaze in IVR. However, there is a recent study that argued that head gaze can indeed serve as a proxy for eye gaze in the context of human-robot interaction <ref type="bibr" target="#b13">[14]</ref> when the aim is to teach a (virtual) robot about a person's intent, i.e. what object a person is intending to interact with. Similarly, head gaze has proven useful in a scenario involving the collaboration with a virtual agent <ref type="bibr" target="#b14">[15]</ref>. In this study, the use of bidirectional head gaze between human participants and a virtual character was shown to have a similar positive effect on task performance as bidirectional gaze using eye tracking.</p><p>In the same vein, a few studies from the field of social psychology have utilized head gaze as a proxy for social eye contact. For example, one study investigated how participants interacted with a virtual physician during a simulated clinical visit <ref type="bibr" target="#b15">[16]</ref>. The authors reported that the emotional state of the participants influenced the amount of eye contact they made during the conversation with the physician. Another IVR study tracked nonverbal behavior of participants in a virtual classroom and found different patterns of head movement depending on the level of self-reported social anxiety. Participants with higher level of anxiety exhibited more lateral head movement, indicating increased room scanning behavior compared to participants with low levels of anxiety <ref type="bibr" target="#b16">[17]</ref>.</p><p>Both studies made the implicit assumption that users are mostly looking straight ahead when wearing an IVR headset, thus exhibiting little eye-in-head motion range. Although it has shown to be useful to approximate eye tracking with head tracking <ref type="bibr" target="#b14">[15,</ref><ref type="bibr" target="#b13">14]</ref>, it is noteworthy that users' eye movements in IVR can show quite substantial deviations from the forward direction of the head pose. Sidenmark and Gellersen investigated the coordination of eye, head, and body movements during gaze shifts <ref type="bibr" target="#b17">[18]</ref>. They found that smaller gaze shifts of 25°visual angle or less are predominantly performed with the eyes and without much contribution from the head or torso. However, they also reported large inter-individual differences between users in terms of the eyes' motion range, varying from 20°to 70°visual angle. In line with these findings, another study recently found a high correspondence between eye and head movements in IVR, leading to an accuracy of 75% for AOI with an angular size of 25°, with a substantial drop in accuracy when the AOI were smaller <ref type="bibr" target="#b18">[19]</ref>.</p><p>Taken together, the existing literature shows initial evidence that head gaze can be successfully utilized to approximate eye gaze in IVR, provided that careful attention is directed towards the design of the virtual objects (i.e., AOI). However, we are not aware of studies that investigated the practicability of these findings in applied settings of learning or training. Therefore, the aim of the study was twofold. First, we aimed to evaluate the similarity between eye and head gaze in a dynamic virtual teaching scenario. Based on the previous findings, we hypothesized that we would observe a high correspondence of head pose and eye gaze in a sparsely furnished IVR training environment (i.e., a scene with predominantly large AOI). Second, we investigated whether we could use a machine learning algorithm to successfully predict the correct eye gaze targets based on the head gaze plus additional positional tracking data recorded from the IVR headset and corresponding hand controllers.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Methods</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1.">Participants and context</head><p>Forty-five pre-service teachers (PSTs) at a large metropolitan university in South Africa participated in the study. The sample consisted of third-year undergraduates from the Department of Science and Technology education.</p><p>As an integral part of their third-year curriculum, PSTs are practicing their teaching skills by conducting several microlessons throughout their studies. Microlessons are defined as short lesson presentations, typically revolving around a single, tightly defined topic <ref type="bibr" target="#b19">[20]</ref>. The goal of microlessons is to develop the PSTs' pedagogical skills in a safe environment and to teach them how to reflect on their own behavior.</p><p>In the context of this study, PSTs chose one of sixteen topics from the subjects of biology, physics and chemistry. Their task was to prepare the lesson and deliver it using a learnercentered, inquiry-based teaching strategy inside the IVR environment. In an inquiry-based science classroom, the teacher is seen as a facilitator, who provides ample opportunities for learners to actively engage in the learning process <ref type="bibr" target="#b20">[21,</ref><ref type="bibr" target="#b21">22]</ref>. The study was approved by the local ethics committee of the University of Johannesburg.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2.">IVR Learning Environment</head><p>The IVR application was co-designed with five teacher educators to ensure the alignment with inquiry-based teaching. Hence, the IVR classroom was set up in the Unity Game Engine with two types of tables 1) a main table to accommodate students during the introductory and closing phases of the microlessons, and 2) three separate breakout tables, where groups of two to three students could collaborate on a given task. The classroom was also equipped with a whiteboard for slide presentations and drawings, and a flipchart for displaying quiz results. Furthermore, there was a teacher's podium that hosted a control panel to manipulate various classroom functions (e.g., controlling the slides, starting a quiz, etc.). Importantly, it could also be used to select, spawn, and move 3D objects as well as students between tables. For illustration, a screenshot of the IVR environment is depicted in Figure <ref type="figure" target="#fig_0">1</ref>. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.3.">Procedure</head><p>Before the participants delivered their microteaching lesson, they received a brief training about the IVR classroom including a short hands-on experience to familiarize them with the available tools and objects of the IVR classroom. Then, the PSTs carried out the teaching exercise while changing roles after each microlesson. For example, in a group of four PSTs A, B, C, and D, PST A would first take on the role of teacher, while the other three PSTs would assume the role of students. After a maximum of 15 minutes allocated for the microlesson, they would change roles and repeat the procedure until each PST had completed their lesson. Later, the PSTs received individual feedback on their teaching behavior from their educator based on a recording of the lesson and a learning analytics dashboard.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.4.">Data collection and preparation</head><p>From a total of 51 microlessons held, we selected 23 based on the following criteria: functional version of the application able to record the eye rotation data (11 excluded sessions), minimum duration of 5 minutes (17 excluded), and 2 lessons were excluded due to the reusage of the same login for a teacher and a student in the same microlesson. This filtering resulted in excluding 6 out of 24 participants as teachers from the dataset. Eye gaze data was only collected for the teacher roles because these participants wore a Meta Quest Pro headset with integrated eye tracker as opposed to the Meta Quest 2 headsets for the student role. PSTs in the teacher role received feedback on their eye gaze behavior via the learning analytics dashboard after the lesson. Eye tracking was not available for the student roles due to the limited availability and higher cost of the eye tracking enabled IVR headsets. Raw data was collected automatically from each device during the run of the microlessons. The collected sensors included pose data (position and rotation) from the IVR headset and both hand controllers, rotation for both eyes and the head and the objects where the user was gazing at. These are summarized in Table <ref type="table" target="#tab_0">1</ref>. These sensory data were collected independently for each user and sensor, with different sampling rate for each sensor -10 Hz for positional data and 20 Hz for the eye tracking data. Hence, the sensory data needed to be synchronized first. This was done by creating 50ms time windows and taking a) the minimum value inside each window time frame for the positional and rotational data and b) the union of all objects present in the eye and head gazing data. Each collected row represents a 50ms long window from a microlesson. Concatenating data from all microlessons generated N=439,749 rows.</p><p>We modeled the problem as a multi-class classification. The target variable was the object that was detected as being gazed at by the user's eyes. Before training the model, the dataset had to be cleaned. This included the removal of eye rotation values outside the reported field-of-view of the IVR headset (representing recording failures) and saccades. Several approaches exist to identify fixations and saccades, respectively. We used an Area-of-Interest Identification algorithm, which defines a fixation as a group of consecutive gaze points that fall within the same target area <ref type="bibr" target="#b22">[23]</ref>. Groups that did not span a minimum duration of 100 ms were regarded as saccades and excluded from the analysis. This filtering resulted in a reduced dataset of N=186,864 rows.</p><p>The gazed objects can distinguish between specific users, but since these users were different across sessions, a class User was created to represent all the users. This processing resulted in a dataset with N=338,040 rows with 13 different classes<ref type="foot" target="#foot_0">1</ref> , recorded for 18 users in 23 microlessons, with the average duration 16 minutes (min=5, max=28) and four other students on average being present apart from the teacher (min=1, max=5).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.5.">Analysis</head><p>For the modeling, we utilized a Random Forest classifier. This selection was motivated by Random Forest being often one of the best classifiers in Learning Analytics data <ref type="bibr" target="#b23">[24]</ref>, but also in a recent study on VR data collected from an educational application <ref type="bibr" target="#b24">[25]</ref>. We used the implementation from the R ranger package <ref type="bibr" target="#b25">[26]</ref>. Due to the large dataset, we did not perform hyperparameter tuning and for the same reason, we used the version of training without replacement and training on a .632 fraction of the data, setting the seed=123, and leaving all other parameters default.</p><p>We used 5-fold cross validation (i.e. always 80% of the dataset with 20% left for testing) with the split was stratified by the class distribution. The random forest model (further referred as "CLASSIFIER") was compared to two baseline models. 1) Model "FLOOR" represents a naive classifier that classifies all the instances to the majority class. 2) Model "HEAD GAZE" represents a model that is using the gazed object as derived from the head gaze. All the metrics are reported as mean and standard deviation across the 5 testing folds.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Results</head><p>For both the machine learning model and the baselines, we report usual metrics for a multiclass classification, i.e. precision, recall and F1-score, which were averaged across all the classes using a) the weighted and b) macro average. We focus more on the weighted average because we think it is important to consider the distribution of the classes.</p><p>The results of both average types are depicted in Figure <ref type="figure" target="#fig_1">2</ref>. We see that the performance using only the baseline model "FLOOR" is very poor, both for the weighted and macro average. The only value above .20 is a weighted-average recall, due to classifying the largest class affecting the weighted average more than the macro. The "HEAD GAZE" baseline performs better than "FLOOR" on all the metrics, showing promising direction. However all the values are below .35, which is still very poor performance.</p><p>On the other hand, both averages reveal a steep increase in the performance for the "CLASSI-FIER" over both of the baselines, with the F1-score for the macro-average .7568 and the weighted average .8637. The higher values of the weighted average are caused by a better performance on the larger classes. This is expected, as some of the minor classes might not have a sufficient representation in the dataset to produce good results.</p><p>A similar picture about the improvement of the machine learning model compared to the baseline appears from Figure <ref type="figure" target="#fig_2">3</ref>, depicting the heatmaps for the "HEAD GAZE" and "CLASSIFIER" predictors. While the baseline model matrix is quite scattered, and full of misclassifications for almost every class, the "CLASSIFIER" model reveals a more pronounced diagonal line indicating higher precision on all classes. For example, it is apparent that the "HEAD GAZE", misclassified many objects as "User". This would indicate that a teacher is indeed paying closer attention to students, a laudable feature of student-centered teaching. These false-positives for a "User" are significantly reduced for the "CLASSIFIER" model. Still, the classifier is far from perfect, especially because of the many misclassifications for the two largest classes "Room Wall" and "Room Floor". </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.">Discussion</head><p>Investigating the use of positional tracking in a microteaching scenario, we found a low correspondence of gaze targets inferred from eye tracking and the forward orientation of an IVR headset, that is, comparing eye gaze and head gaze. This result suggests that head gaze alone does not sufficiently approximate eye gaze, which is in contrast to previous reports claiming that the former can be used as proxy for the latter <ref type="bibr" target="#b14">[15,</ref><ref type="bibr" target="#b13">14]</ref>. However, our finding is consistent with the notion that people exhibit substantial eye gaze deviations from the head forward direction with large inter-individual differences regarding the eye-in-head motion range <ref type="bibr" target="#b17">[18]</ref>. Moreover, it is noteworthy that contrary to controlled, experimental studies, we investigated positional tracking in an applied training scenario. Teaching a microlesson in the IVR environment entailed dynamic motion in terms of changing between different locations ("teleporting") and the handling of various interactive tools and objects.</p><p>Nevertheless, we could demonstrate the usefulness of positional tracking data in IVR. Although head gaze matched the eye gaze only poorly, submitting the positional data of the IVR headset and hand controllers to a Random Forest classifier, the model was able to predict the fixations of the eye tracking with high precision and recall. More specifically, the results indicated a .8637 ± .0020 F1-score for weighted-average of the random forest. Compared to the F1-score of .2867 ± .0017 for the baseline model with head gaze, this represents an improvement of .5770.</p><p>Despite the promising results regarding the usefulness of positional tracking data to predict actual eye gaze during teaching in IVR, it is important to discuss potential limitations of our approach to the data analysis. Although we trained and evaluated our classifier on different data samples, both datasets contained data from the same individuals. It is therefore conceivable that the resulting predictive performance is inflated, i.e., higher than if the model had been evaluated on new participants. This holds particularly true as people show significant interindividual differences in eye movement behavior <ref type="bibr" target="#b17">[18]</ref>, which would make the prediction of new participants' behavior challenging. However, it is also important to emphasize that the PSTs rotate roles in the teaching exercise. Therefore, it can be considered adequate to train an algorithm on the PSTs' data in a session when they are wearing an eye tracking enabled headset, and use that model to make inferences about their gaze in the other sessions.</p><p>Another potential limitation to note is that classical random forest classifiers do not generate high-quality models on correlated data <ref type="bibr" target="#b26">[27]</ref>. This stems from the violated assumption of independent and identically distributed when dealing with longitudinal data. Therefore, a future direction of our research is to use a more computationally intensive model (e.g. a long short-term memory (LSTM) recurrent neural network) designed to handle time-series data.</p><p>Finally, we would like to point out that, to our knowledge, no established, independent estimates of the Meta Quest Pro's eye tracking performance exist to date. A preliminary study found an accuracy of 1.652º and a precision of 0.699º standard deviation, which is comparable to other IVR devices with integrated eye tracking <ref type="bibr" target="#b27">[28]</ref>. However, the authors of the study also point out to be careful when interpreting fixation results. Their word of caution is related to the findings that the validity and reliability of eye tracking in IVR is influenced by many interacting factors, e.g. the placement of visual targets close or far from the periphery or vision correction <ref type="bibr" target="#b7">[8]</ref>. Generally speaking, there is always a certain uncertainty involved in eye tracking research in the absence of an external reference measurement. This is not a specific limitation of this study but rather a general problem of eye tracking research.</p><p>For future direction, we are planning to corroborate and validate our findings by a) employing a more adequate machine learning model (see above) and b) investigating how well our results transfer from the teacher to the student role. For this purpose, we are planning to equip students with eye tracking enabled devices too. This would allow us to train and test an ML algorithm on different sessions of the same PST in different roles. Showing that the good predictive performance power of the positional data generalizes across different sessions could have farreaching practical implications for teacher education. It would equip PSTs and their educators with sophisticated, non-obtrusive ways to measure the attentional focus of teacher and students during a teaching exercise. For example, it could be used to make inferences about the PSTs' ability to distribute their attention to all students equally, and to make them aware of how their behavior compares to that of experienced teachers <ref type="bibr" target="#b12">[13]</ref>. Generally, visualizing the attentional focus can greatly contribute to augmenting the feedback the PSTs receive from their peers and educators, therefore improving this central component of teacher training <ref type="bibr" target="#b19">[20]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6.">Conclusion</head><p>In this study, we investigated to what extent we can approximate the eye gazed objects by a) head gaze and b) a random forest classifier trained using the combination of position and rotation data. This is in the context of an IVR classroom of PSTs practicing their lesson. We found an added benefit of the machine learning model, which showed a good performance, opposed to using only the rather poor results of the pure head gaze approximation. These results are promising as they suggest that in some contexts, using cheaper devices might be sufficient to estimate the eye gaze of IVR users, and enable analytics possible currently only on expensive devices with eye tracking.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Screenshot from the VR Microlesson application with 2 students in the lesson with the human heart 3D model.</figDesc><graphic coords="4,89.29,84.19,416.69,209.83" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: Precision, recall and F1-score for all three models using weighted average (left), macro average (center) and a table with the results for both average types (right).</figDesc><graphic coords="7,89.29,84.19,416.70,169.40" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 3 :</head><label>3</label><figDesc>Figure 3: Two heatmaps depicting the correspondence of gaze targets as determined by the eye tracking ("Gaze Target Eye") with a) detected objects from the IVR headset's forward orientation ("Gaze Target Head") and b) the predicted gaze targets of the random forest. Darker shades on the diagonal represent higher classification performance.</figDesc><graphic coords="8,89.29,84.19,416.72,217.49" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_0"><head>Table 1</head><label>1</label><figDesc>Features extracted from the microlessons</figDesc><table><row><cell>name</cell><cell>description</cell></row></table></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_0">3D Object, User, Object other, Flipchart, Room LessonState, Whiteboard, Podium, Room Chair, Room Table, Room Ceiling, Room Floor, Room Wall</note>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgments</head><p>We would like to thank Lucas Martinic and Ferhan Özkan from XR Bootcamp GmbH for programming the VR application and Deian Popic for creating the 3D assets used in the study. The study was partially funded by a Higher Ed XR Innovation grant from the Tides Foundation.</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<monogr>
		<author>
			<persName><forename type="first">J</forename><surname>Bailenson</surname></persName>
		</author>
		<title level="m">Experience on demand: What virtual reality is, how it works, and what it can do</title>
				<meeting><address><addrLine>New York, NY, US</addrLine></address></meeting>
		<imprint>
			<publisher>W. W. Norton &amp; Company</publisher>
			<date type="published" when="2018">2018</date>
			<biblScope unit="page">290</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">The Promise and Pitfalls of Learning in Immersive Virtual Reality</title>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">E</forename><surname>Mayer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Makransky</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Parong</surname></persName>
		</author>
		<idno type="DOI">10.1080/10447318.2022.2108563</idno>
	</analytic>
	<monogr>
		<title level="j">Int. Journal of Human-Computer Interaction</title>
		<imprint>
			<biblScope unit="page" from="1" to="10" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Extracting Velocity-Based User-Tracking Features to Predict Learning Gains in a Virtual Reality Training Application</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">G</forename><surname>Moore</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">P</forename><surname>Mcmahan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Dong</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Ruozzi</surname></persName>
		</author>
		<idno type="DOI">10.1109/ISMAR50242.2020.00099</idno>
	</analytic>
	<monogr>
		<title level="m">IEEE Int. Symposium on Mixed and Augmented Reality (ISMAR), IEEE</title>
				<meeting><address><addrLine>Porto de Galinhas, Brazil</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2020">2020. 2020</date>
			<biblScope unit="page" from="694" to="703" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Work-in-Progress-Motion Tracking Data as a Proxy for Cognitive Load in Immersive Learning</title>
		<author>
			<persName><forename type="first">I</forename><surname>Moser</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I.-S</forename><surname>Comsa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Parsaeifard</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Bergamin</surname></persName>
		</author>
		<idno type="DOI">10.23919/iLRN55037.2022.9815894</idno>
	</analytic>
	<monogr>
		<title level="m">2022 8th International Conference of the Immersive Learning Research Network (iLRN)</title>
				<meeting><address><addrLine>Vienna, Austria</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE</publisher>
			<date type="published" when="2022">2022</date>
			<biblScope unit="page" from="1" to="3" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Tracing physical behavior in virtual reality: A narrative review of applications to social psychology</title>
		<author>
			<persName><forename type="first">H</forename><forename type="middle">E</forename><surname>Yaremych</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Persky</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.jesp.2019.103845</idno>
	</analytic>
	<monogr>
		<title level="j">Journal of Experimental Social Psychology</title>
		<imprint>
			<biblScope unit="volume">85</biblScope>
			<biblScope unit="page">103845</biblScope>
			<date type="published" when="2019">2019</date>
			<publisher>Elsevier</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Review of Eye Tracking Metrics Involved in Emotional and Cognitive Processes</title>
		<author>
			<persName><forename type="first">V</forename><surname>Skaramagkas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Giannakakis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Ktistakis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Manousos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Karatzanis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Tachos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Tripoliti</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Marias</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">I</forename><surname>Fotiadis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Tsiknakis</surname></persName>
		</author>
		<idno type="DOI">10.1109/RBME.2021.3066072</idno>
	</analytic>
	<monogr>
		<title level="j">IEEE Reviews in Biomedical Engineering</title>
		<imprint>
			<biblScope unit="volume">16</biblScope>
			<biblScope unit="page" from="260" to="277" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">Eye tracking in virtual reality</title>
		<author>
			<persName><forename type="first">V</forename><surname>Clay</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>König</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">U</forename><surname>König</surname></persName>
		</author>
		<idno type="DOI">10.16910/jemr.12.1.3</idno>
	</analytic>
	<monogr>
		<title level="j">Journal of Eye Movement Research</title>
		<imprint>
			<biblScope unit="volume">12</biblScope>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Eye tracking in virtual reality: Vive pro eye spatial accuracy, precision, and calibration reliability</title>
		<author>
			<persName><forename type="first">I</forename><surname>Schuetz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Fiehler</surname></persName>
		</author>
		<idno type="DOI">10.16910/jemr.15.3.3</idno>
	</analytic>
	<monogr>
		<title level="j">Journal of Eye Movement Research</title>
		<imprint>
			<biblScope unit="volume">15</biblScope>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">Efficient and Accurate Object 3D Selection With Eye Tracking-Based Progressive Refinement</title>
		<author>
			<persName><forename type="first">Y</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Kopper</surname></persName>
		</author>
		<idno type="DOI">10.3389/frvir.2021.607165</idno>
	</analytic>
	<monogr>
		<title level="j">Frontiers in Virtual Reality</title>
		<imprint>
			<biblScope unit="volume">2</biblScope>
			<biblScope unit="page">607165</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Gaze Speedup: Eye Gaze Assisted Gesture Typing in Virtual Reality</title>
		<author>
			<persName><forename type="first">M</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">M</forename><surname>Pierce</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Tan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><forename type="middle">R</forename><surname>Jonker</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Benko</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Gupta</surname></persName>
		</author>
		<idno type="DOI">10.1145/3581641.3584072</idno>
	</analytic>
	<monogr>
		<title level="m">Proc. of the 28th Int. Conf. on Intelligent User Interfaces</title>
				<meeting>of the 28th Int. Conf. on Intelligent User Interfaces<address><addrLine>Sydney NSW Australia</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="595" to="606" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">Person Independent, Privacy Preserving, and Real Time Assessment of Cognitive Load using Eye Tracking in a Virtual Reality Setup</title>
		<author>
			<persName><forename type="first">E</forename><surname>Bozkir</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Geisler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Kasneci</surname></persName>
		</author>
		<idno type="DOI">10.1109/VR.2019.8797758</idno>
	</analytic>
	<monogr>
		<title level="m">IEEE Conf. on Virtual Reality and 3D User Interfaces (VR)</title>
				<meeting><address><addrLine>Osaka, Japan</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE</publisher>
			<date type="published" when="2019">2019. 2019</date>
			<biblScope unit="page" from="1834" to="1837" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">The Impact of Sharing Gaze Behaviours in Collaborative Mixed Reality</title>
		<author>
			<persName><forename type="first">A</forename><surname>Jing</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>May</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Matthews</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Lee</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Billinghurst</surname></persName>
		</author>
		<idno type="DOI">10.1145/3555564</idno>
	</analytic>
	<monogr>
		<title level="j">Proceedings of the ACM on Human-Computer Interaction</title>
		<imprint>
			<biblScope unit="volume">6</biblScope>
			<biblScope unit="page" from="1" to="27" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">Eye-tracking research on teacher professional vision: A meta-analytic review</title>
		<author>
			<persName><forename type="first">O</forename><surname>Keskin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Seidel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Stürmer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Gegenfurtner</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.edurev.2023.100586</idno>
	</analytic>
	<monogr>
		<title level="j">Educational Research Review</title>
		<imprint>
			<biblScope unit="volume">42</biblScope>
			<biblScope unit="page">100586</biblScope>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Head pose as a proxy for gaze in virtual reality</title>
		<author>
			<persName><forename type="first">P</forename><surname>Higgins</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Barron</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Matuszek</surname></persName>
		</author>
		<ptr target="https://openreview.net/forum?id=ShGeRZBcp19" />
	</analytic>
	<monogr>
		<title level="m">5th international workshop on virtual, augmented, and mixed reality for HRI</title>
				<imprint>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">Looking Coordinated: Bidirectional Gaze Mechanisms for Collaborative Interaction with Virtual Characters</title>
		<author>
			<persName><forename type="first">S</forename><surname>Andrist</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Gleicher</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Mutlu</surname></persName>
		</author>
		<idno type="DOI">10.1145/3025453.3026033</idno>
	</analytic>
	<monogr>
		<title level="m">Proc. of the 2017 CHI Conf. on Human Factors in Computing Systems</title>
				<meeting>of the 2017 CHI Conf. on Human Factors in Computing Systems<address><addrLine>Denver Colorado USA</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="2571" to="2582" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">Nonverbal and paraverbal behavior in (simulated) medical visits related to genomics and weight: a role for emotion and race</title>
		<author>
			<persName><forename type="first">S</forename><surname>Persky</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">A</forename><surname>Ferrer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">M P</forename><surname>Klein</surname></persName>
		</author>
		<idno type="DOI">10.1007/s10865-016-9747-5</idno>
	</analytic>
	<monogr>
		<title level="j">Journal of Behavioral Medicine</title>
		<imprint>
			<biblScope unit="volume">39</biblScope>
			<biblScope unit="page" from="804" to="814" />
			<date type="published" when="2016">2016</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">Identifying Anxiety Through Tracked Head Movements in a Virtual Classroom</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">S</forename><surname>Won</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Perone</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Friend</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">N</forename><surname>Bailenson</surname></persName>
		</author>
		<idno type="DOI">10.1089/cyber.2015.0326</idno>
	</analytic>
	<monogr>
		<title level="j">Cyberpsychology, Behavior, and Social Networking</title>
		<imprint>
			<biblScope unit="volume">19</biblScope>
			<biblScope unit="page" from="380" to="387" />
			<date type="published" when="2016">2016</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">Eye, Head and Torso Coordination During Gaze Shifts in Virtual Reality</title>
		<author>
			<persName><forename type="first">L</forename><surname>Sidenmark</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Gellersen</surname></persName>
		</author>
		<idno type="DOI">10.1145/3361218</idno>
	</analytic>
	<monogr>
		<title level="j">ACM Trans. on Computer-Human Interaction</title>
		<imprint>
			<biblScope unit="volume">27</biblScope>
			<biblScope unit="page" from="1" to="40" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">Comparing Eye Tracking and Head Tracking During a Visual Attention Task in Immersive Virtual Reality</title>
		<author>
			<persName><forename type="first">J</forename><surname>Llanes-Jurado</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Marín-Morales</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Moghaddasi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Khatri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Guixeres</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Alcañiz</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-030-78465-2_3</idno>
	</analytic>
	<monogr>
		<title level="m">Human-Computer Interaction. Interaction Techniques and Novel Applications</title>
				<editor>
			<persName><forename type="first">M</forename><surname>Kurosu</surname></persName>
		</editor>
		<meeting><address><addrLine>Cham</addrLine></address></meeting>
		<imprint>
			<publisher>Springer International Publishing</publisher>
			<date type="published" when="2021">2021</date>
			<biblScope unit="volume">12763</biblScope>
			<biblScope unit="page" from="32" to="43" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">Microteaching, an efficient technique for learning effective teaching</title>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">L</forename><surname>Banga</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Scholarly research journal for interdisciplinary studies</title>
		<imprint>
			<biblScope unit="volume">15</biblScope>
			<biblScope unit="page" from="2206" to="2211" />
			<date type="published" when="2014">2014</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">The 5E instructional model: A learning cycle approach for inquirybased science teaching</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">B</forename><surname>Duran</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Duran</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Science Education Review</title>
		<imprint>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="page" from="49" to="58" />
			<date type="published" when="2004">2004</date>
			<publisher>ERIC</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">Pre-Service Teacher Experiences of the 5E Instructional Model: A Systematic Review of Qualitative Studies</title>
		<author>
			<persName><forename type="first">S</forename><surname>Turan</surname></persName>
		</author>
		<idno type="DOI">10.29333/ejmste/11102</idno>
	</analytic>
	<monogr>
		<title level="j">Eurasia Journal of Mathematics, Science and Technology Education</title>
		<imprint>
			<biblScope unit="volume">17</biblScope>
			<biblScope unit="page">1994</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">Identifying fixations and saccades in eye-tracking protocols</title>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">D</forename><surname>Salvucci</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">H</forename><surname>Goldberg</surname></persName>
		</author>
		<idno type="DOI">10.1145/355017.355028</idno>
	</analytic>
	<monogr>
		<title level="m">Proc. of the symposium on Eye tracking research &amp; applications -ETRA &apos;00</title>
				<meeting>of the symposium on Eye tracking research &amp; applications -ETRA &apos;00<address><addrLine>Palm Beach Gardens, Florida, United States</addrLine></address></meeting>
		<imprint>
			<publisher>ACM Press</publisher>
			<date type="published" when="2000">2000</date>
			<biblScope unit="page" from="71" to="78" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<analytic>
		<title level="a" type="main">Prediction of dilatory behavior in elearning: A comparison of multiple machine learning models</title>
		<author>
			<persName><forename type="first">C</forename><surname>Imhof</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I.-S</forename><surname>Comsa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Hlosta</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Parsaeifard</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Moser</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Bergamin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Transactions on Learning Technologies</title>
		<imprint>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">Learning analytics for student modeling in virtual reality training systems: Lineworkers case</title>
		<author>
			<persName><forename type="first">G</forename><surname>Santamaría-Bonfil</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">B</forename><surname>Ibáñez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Pérez-Ramírez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Arroyo-Figueroa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Martínez-Álvarez</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Computers &amp; Education</title>
		<imprint>
			<biblScope unit="volume">151</biblScope>
			<biblScope unit="page">103871</biblScope>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<monogr>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">N</forename><surname>Wright</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Ziegler</surname></persName>
		</author>
		<idno type="arXiv">arXiv:1508.04409</idno>
		<title level="m">ranger: A fast implementation of random forests for high dimensional data in c++ and r</title>
				<imprint>
			<date type="published" when="2015">2015</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b26">
	<analytic>
		<title level="a" type="main">Mixed effect machine learning: A framework for predicting longitudinal change in hemoglobin a1c</title>
		<author>
			<persName><forename type="first">C</forename><surname>Ngufor</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Van Houten</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">S</forename><surname>Caffo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">D</forename><surname>Shah</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">G</forename><surname>Mccoy</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.jbi.2018.09.001</idno>
	</analytic>
	<monogr>
		<title level="j">Journal of Biomedical Informatics</title>
		<imprint>
			<biblScope unit="volume">89</biblScope>
			<biblScope unit="page" from="56" to="67" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b27">
	<analytic>
		<title level="a" type="main">A Preliminary Study of the Eye Tracker in the Meta Quest Pro</title>
		<author>
			<persName><forename type="first">S</forename><surname>Wei</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Bloemers</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Rovira</surname></persName>
		</author>
		<idno type="DOI">10.1145/3573381.3596467</idno>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2023 ACM International Conference on Interactive Media Experiences</title>
				<meeting>the 2023 ACM International Conference on Interactive Media Experiences<address><addrLine>Nantes France</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="216" to="221" />
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
