<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">A novel neuro-fuzzy approach for evaluating educational programme quality and institutional performance in higher education</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Andriy</forename><forename type="middle">V</forename><surname>Ryabko</surname></persName>
							<email>ryabko@meta.ua</email>
							<affiliation key="aff0">
								<orgName type="institution">Oleksandr Dovzhenko Hlukhiv National Pedagogical University</orgName>
								<address>
									<addrLine>24 Kyivska Str</addrLine>
									<postCode>41400</postCode>
									<settlement>Glukhiv</settlement>
									<country key="UA">Ukraine</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Tetiana</forename><forename type="middle">A</forename><surname>Vakaliuk</surname></persName>
							<email>tetianavakaliuk@gmail.com</email>
							<affiliation key="aff1">
								<orgName type="institution">Zhytomyr Polytechnic State University</orgName>
								<address>
									<addrLine>103 Chudnivsyka Str</addrLine>
									<postCode>10005</postCode>
									<settlement>Zhytomyr</settlement>
									<country key="UA">Ukraine</country>
								</address>
							</affiliation>
							<affiliation key="aff2">
								<orgName type="department">Institute for Digitalisation of Education</orgName>
								<orgName type="institution">NAES of Ukraine</orgName>
								<address>
									<addrLine>9 M. Berlynskoho Str</addrLine>
									<postCode>04060</postCode>
									<settlement>Kyiv</settlement>
									<country key="UA">Ukraine</country>
								</address>
							</affiliation>
							<affiliation key="aff3">
								<orgName type="institution">Kryvyi Rih State Pedagogical University</orgName>
								<address>
									<addrLine>54 Universytetskyi Ave., Kryvyi Rih</addrLine>
									<postCode>50086</postCode>
									<country key="UA">Ukraine</country>
								</address>
							</affiliation>
							<affiliation key="aff4">
								<orgName type="department">Academy of Cognitive and Natural Sciences</orgName>
								<address>
									<addrLine>54 Universytetskyi Ave., Kryvyi Rih</addrLine>
									<postCode>50086</postCode>
									<country key="UA">Ukraine</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Oksana</forename><forename type="middle">V</forename><surname>Zaika</surname></persName>
							<email>ksuwazaika@gmail.com</email>
							<affiliation key="aff0">
								<orgName type="institution">Oleksandr Dovzhenko Hlukhiv National Pedagogical University</orgName>
								<address>
									<addrLine>24 Kyivska Str</addrLine>
									<postCode>41400</postCode>
									<settlement>Glukhiv</settlement>
									<country key="UA">Ukraine</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Roman</forename><forename type="middle">P</forename><surname>Kukharchuk</surname></persName>
							<affiliation key="aff0">
								<orgName type="institution">Oleksandr Dovzhenko Hlukhiv National Pedagogical University</orgName>
								<address>
									<addrLine>24 Kyivska Str</addrLine>
									<postCode>41400</postCode>
									<settlement>Glukhiv</settlement>
									<country key="UA">Ukraine</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Iryna</forename><forename type="middle">O</forename><surname>Kukharchuk</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Inesa</forename><forename type="middle">V</forename><surname>Novitska</surname></persName>
							<email>inesanovicka@gmail.com</email>
							<affiliation key="aff0">
								<orgName type="institution">Oleksandr Dovzhenko Hlukhiv National Pedagogical University</orgName>
								<address>
									<addrLine>24 Kyivska Str</addrLine>
									<postCode>41400</postCode>
									<settlement>Glukhiv</settlement>
									<country key="UA">Ukraine</country>
								</address>
							</affiliation>
							<affiliation key="aff5">
								<orgName type="institution">Zhytomyr Ivan Franko State University</orgName>
								<address>
									<addrLine>30 Velyka Berdychivska Str</addrLine>
									<postCode>10002</postCode>
									<settlement>Zhytomyr</settlement>
									<country key="UA">Ukraine</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">A novel neuro-fuzzy approach for evaluating educational programme quality and institutional performance in higher education</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">4013D22AC1F1A40197B2DE003681559E</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2025-04-23T16:23+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>educational programme evaluation, institutional performance assessment, neuro-fuzzy inference, ANFIS, artificial neural networks, higher education quality Ryabko)</term>
					<term>0000-0001-6825-4697 (T. A. Vakaliuk)</term>
					<term>0000-0002-8479-9408 (O. V. Zaika)</term>
					<term>0000-0002-7588-7406 (R. P. Kukharchuk)</term>
					<term>0000-0002-2854-1429 (I. O. Kukharchuk)</term>
					<term>0000-0003-0780-0580 (I. V. Novitska)</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>This paper presents a novel methodology for evaluating the quality of educational programmes and institutional performance in higher education institutions using advanced artificial intelligence techniques, specifically the Adaptive Neuro-Fuzzy Inference System (ANFIS) and multi-layer neural networks. The primary objectives of the study were to address the challenges of subjectivity in self-assessment processes and proactively identify potential issues and deficiencies in educational activities prior to accreditation reviews. The proposed approach utilised student ratings on a four-level assessment scale as input data for the multi-layer neural network, while the criteria for assessing educational programme quality served as input variables for the ANFIS model. The underlying hypothesis was that students with higher academic performance would provide more objective assessments of the quality criteria. The results demonstrated that the multi-layer neural network exhibited superior predictive accuracy compared to the ANFIS model. This paper suggests that the proposed methodology can equip higher education leaders with high-quality forecasts to ascertain the calibre of educational services and pinpoint potential problems in advance of accreditation examinations. However, the authors acknowledge the necessity for further research with larger datasets to enhance the predictive capabilities of the models.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>The evaluation of educational quality is a complex and multifaceted endeavour, often involving the assessment of non-numerical characteristics that are challenging to formalise. While certain aspects of higher education institutions, such as the number of computers, students, or the area of educational premises, are quantifiable, the evaluation of educational programmes and institutional performance is typically conducted using qualitative criteria. In the context of self-assessment and accreditation processes, institutions and expert reviewers are required to assess compliance with established criteria using a four-level scale: A, B, E, and F.</p><p>Consequently, there is a pressing need for the development of methods that enable the quantitative description of processes and subjects related to the assessment of educational programme quality and institutional performance. The concept of educational quality is of particular significance, as it represents a comprehensive indicator that reflects both the outcomes of an educational institution and its alignment with societal needs and expectations in terms of individual competency development. The application of quantitative evaluation methods for educational programmes and institutional activities can empower higher education institutions to identify existing deficiencies and potential issues, providing an opportunity to address them proactively before accreditation examinations.</p><p>However, assessing the quality of educational programmes and institutional performance is complicated by the fact that the value of this indicator is influenced by numerous factors, potentially with an unknown nature of influence. Moreover, the "product" of education -a graduate of an educational institution -should be considered as a complex system. While various methods and algorithms exist for assessing the quality of educational activities, this study proposes a novel approach based on the neuro-fuzzy paradigm, leveraging the rapid advancements in artificial intelligence-based analytical systems. Among the most well-established and effective AI technologies are neural networks, which have demonstrated success in addressing a wide range of "fuzzy" tasks, such as prediction, classification, handwritten text recognition, language processing, image analysis <ref type="bibr" target="#b0">[1,</ref><ref type="bibr" target="#b1">2,</ref><ref type="bibr" target="#b2">3]</ref>, and often serve as the sole effective solution in scenarios where traditional technologies are inadequate. In this work, artificial neural networks are employed to tackle the challenge of evaluating the quality of educational programmes and institutional performance.</p><p>A prerequisite for accreditation is the compliance of educational programmes and institutional activities with legally established criteria. Specifically, the forms and methods of teaching should effectively contribute to the attainment of the stated objectives of the educational programme and the intended learning outcomes.</p><p>Given that educational programmes and institutional activities must adhere to the principles of student-centredness and academic freedom, the hypothesis of this study posits that a sample of current students and recent graduates can provide an adequate comprehensive assessment of the quality of educational programmes and institutional performance.</p><p>The intelligent processing of data using neural networks enables the generation of probabilistic forecasts of future accreditation examination results in higher education institutions, which can facilitate the improvement of measures aimed at enhancing educational programmes. These predictive insights can serve as informative and advisory resources for faculty and department leaders. Furthermore, educational programme coordinators can leverage these forecasts to plan activities and individualised work with educators to positively influence the predicted outcomes. The analysis of the obtained data can also reveal weaknesses in the educational process, providing opportunities for modernisation.</p><p>In light of these considerations, this article aims to substantiate, develop, and implement a mathematical model for the comprehensive assessment of educational programme quality and institutional performance based on neuro-fuzzy approaches.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.1.">Theoretical background</head><p>The assessment of educational activity quality based on well-defined criteria and methodologies is a crucial aspect of the accreditation process for educational programmes in Ukraine. During the preparation for accreditation and the compilation of materials for self-assessment, institutions often encounter challenges in determining the objectivity of their self-evaluation and identifying potential problems and shortcomings in their educational activities. To address this issue, there is an urgent need for mathematical tools that can assist higher education managers in evaluating the quality of the educational services they provide.</p><p>The shift in educational philosophy and practice has led to a heightened focus on student learning outcomes. The educational process should be results-oriented, emphasising what students actually know and can do. Consequently, student-centred learning has emerged as an approach in which students influence the content, activities, materials, and pace of their learning, placing them at the heart of the learning process <ref type="bibr" target="#b3">[4]</ref>.</p><p>European Union initiatives emphasise the importance of increasing the efficiency, international attractiveness, and competitiveness of higher education institutions. Wächter et al. <ref type="bibr" target="#b4">[5]</ref> examines various approaches to quality, quality assurance, and ratings, analysing recent research and providing recommendations and policy options for parliament from a comparative perspective.</p><p>The challenge of identifying a set of effective indicators that are easily measurable and applicable to diverse institutions, from large public universities to small regional private colleges, and from university programmes to alternative programmes, is also relevant in the United States <ref type="bibr" target="#b5">[6]</ref>.</p><p>Cherniak et al. <ref type="bibr" target="#b6">[7]</ref> explored the possibility of assessing the quality of qualimetry objects using a graphanalytical method, applying the principle of determining the area and volume under curved surfaces, both in the plane and in space, created by combining estimates of individual quality indicators on a dimensionless scale. The research demonstrates that mathematical dependencies are typically nonlinear, and their investigation involves the development of universal methods applicable to qualimetry objects, regardless of their nature, complexity, or importance. By representing unit quality indicators on a single (dimensionless) rating scale, the authors propose determining a single comprehensive quality indicator for a qualimetry object using the integration method, which takes into account the evaluation of unit quality indicators.</p><p>Parvu and Ipate <ref type="bibr" target="#b7">[8]</ref> propose a mathematical model based on a set of indicators adapted to the globally recognised classification structure of intellectual capital, namely the external structure, internal structure, and employee competence. The Rompedet method, an original product of the Romanian school of management <ref type="bibr" target="#b8">[9]</ref>, is employed as a mathematical calculation tool.</p><p>When assessing the quality of education, we encounter a vast array of criteria, each potentially consisting of numerous sub-criteria. Consequently, the task of evaluating educational quality in its mathematical formulation is inherently multi-criteria. Problem situations modelled and described by linear models that depend on multiple factors play a significant role, and solving multi-criteria decision-making problems often involves solving multi-criteria linear programming problems, also known as vector optimisation problems.</p><p>Considering these challenges, mathematical models of integrated quality assessment using methods based on the convolution of criteria were also of interest for this study. Models and methods of multicriteria optimisation are discussed in <ref type="bibr" target="#b9">[10]</ref>, particularly the method of additive convolution of criteria and the method of multiplicative and minimax convolution of criteria. The method of multiplicative convolution of partial criteria to a single generalised indicator, which utilises the maximum (minimum) values of partial criteria as a normalised divisor, is considered in <ref type="bibr" target="#b10">[11]</ref>. Chervak <ref type="bibr" target="#b11">[12]</ref> employs one of the methods for solving the Paretian multi-criteria optimisation problem as a mathematical tool in the decision-making process. To organise selection problems on the same admissible set of alternatives, the concept of a super criterion for any criterion is introduced; if one criterion is a super criterion of another on a given set, the latter is a sub-criterion of the former. The solution of the multi-criteria selection problem by Paretian convolution is shown to be reducible to the solution of scalar or lexicographic optimisation problems.</p><p>The theory of artificial neural networks and deep learning models is explored in fundamental works <ref type="bibr" target="#b12">[13,</ref><ref type="bibr" target="#b13">14,</ref><ref type="bibr" target="#b14">15]</ref>, while system design based on the neuro-fuzzy approach is discussed in <ref type="bibr" target="#b15">[16,</ref><ref type="bibr" target="#b16">17,</ref><ref type="bibr" target="#b17">18,</ref><ref type="bibr" target="#b18">19,</ref><ref type="bibr" target="#b19">20]</ref>.</p><p>Lesinski et al. <ref type="bibr" target="#b20">[21]</ref> consider the use of neural networks to classify the status of higher education graduates based on selected academic, demographic, and other indicators. A multi-layer neural network with feedback is employed as a model, trained on over 5,000 records from entrance exams and university databases. The nine input variables consisted of categorical and numerical data containing information about high school education, test results, high school teacher assessments, parental assessments, and more. Based on these inputs, the multi-layer neural network predicted the success of university entrants, achieving a classification accuracy exceeding 95%. Black et al. <ref type="bibr" target="#b3">[4]</ref>, examining the relationship between quality and high school student success in college, found no convincing evidence that exposure characteristics of high school diminish over time in teaching students.</p><p>To address the issue of determining the quality of educational training, Mahapatra and Khan <ref type="bibr" target="#b21">[22]</ref> developed the EduQUAL methodology and proposed an integrative approach using neural networks to assess educational quality. Four neural network models based on a feedback algorithm are used to predict educational quality for different stakeholders, with the P-E Gap model identified as the best model for all stakeholders.</p><p>The need to introduce neural network technology in educational courses is highlighted by Semerikov et al. <ref type="bibr" target="#b22">[23]</ref>. Educational neural networks are often used for forecasting purposes. For example, students must choose courses of interest for the upcoming semester, but due to limitations such as insufficient resources and the overhead of offering multiple courses, some universities may not be able to teach all courses selected by students. Universities need to know each student's course requirements for optimal course planning each semester. Kardan et al. <ref type="bibr" target="#b23">[24]</ref> used a neural network to model student choice behaviour and apply the resulting function to predict the final enrolment of students for each course, demonstrating high prediction accuracy based on experimental data. Arsad et al. <ref type="bibr" target="#b24">[25]</ref>, Osadchyi et al. <ref type="bibr" target="#b25">[26]</ref>, and Okubo et al. <ref type="bibr" target="#b26">[27]</ref> prove that the use of neural networks in predicting educational processes allows obtaining results with significantly higher accuracy and in less time. According to Abu Naser et al. <ref type="bibr" target="#b27">[28]</ref>, an artificial neural network can correctly predict the success of more than 80% of future students. <ref type="bibr">Chaban and Kukhtiak [29]</ref> analyse the problem of the social system consisting of many higher education students and teachers to create effective "teacher-student" learning pairs, using elements of artificial intelligence theory based on artificial neural networks to form these learning pairs. Okubo et al. <ref type="bibr" target="#b29">[30]</ref> propose the use of a recurrent neural network (RNN) to predict students' final grades using journal data stored in educational systems.</p><p>Liu et al. <ref type="bibr" target="#b30">[31]</ref> propose a method for assessing the quality of postgraduate education preparation based on the neural network backpropagation algorithm and stress testing. This method creates a publicly available list of indicators consisting of 19 criteria in 4 groups: attitudes towards teaching, teaching content, teaching approach, and key teacher characteristics. After the neural network algorithm determines the optimal parameters of the evaluation model, a sensitivity test identifies indicators that significantly impact educational quality. Additionally, scenario analysis is used to study the impact of educational quality in predefined situations, providing theoretical and empirical support for assessing the quality of postgraduate teaching, improving educational quality, and fostering teachers' professional growth.</p><p>Educational institutions continuously strive to improve their services, aiming to have the best teaching staff, enhance teaching quality, and boost students' academic success. Understanding the factors influencing student learning can help universities and learning centres adapt their curricula and teaching methods to meet students' needs. One of the first measures taken by educational institutions in response to the COVID-19 pandemic was the creation of virtual learning environments <ref type="bibr" target="#b31">[32]</ref>. To understand the factors influencing the university learning process in virtual learning environments, Rivas et al. <ref type="bibr" target="#b32">[33]</ref> applied several automatic learning methods, including tree-like models and various types of artificial neural networks, to publicly available datasets.</p><p>The availability of educational data supported by learning platforms <ref type="bibr" target="#b33">[34]</ref> provides opportunities to study student behaviour and solve problems in higher education, optimise the educational environment, and ensure decision-making using artificial neural networks <ref type="bibr" target="#b34">[35]</ref>.</p><p>Cader <ref type="bibr" target="#b35">[36]</ref> uses a deep neural network to assess students' acquisition of knowledge and skills, noting that the relatively small amount of available assessment data required for neural network training is an obstacle to the application of the method in teaching. A new data augmentation method -asynchronous data augmentation through pre-categorisation -is proposed to address this problem, enabling neural network training even for small datasets. Do and Chen <ref type="bibr" target="#b36">[37]</ref> present a neuro-fuzzy classifier that uses the results of previous exams and other related factors as input variables to classify students based on their expected learning outcomes. The results showed that the proposed approach achieved high accuracy compared to other known classification approaches, such as Naive Bayes and neural networks.</p><p>Fazlollahtabar and Mahdavi <ref type="bibr" target="#b37">[38]</ref> proposed a neuro-fuzzy approach based on evolutionary techniques to obtain the optimal learning pathway for both teachers and students. The neuro-fuzzy approach provides recommendations to teachers for making pedagogical decisions based on students' learning styles, while the neural network approach is used for students to create personalised curriculum profiles based on their individual needs in a fuzzy environment.</p><p>Taylan and Karagözoğlu <ref type="bibr" target="#b38">[39]</ref> use a systematic approach to designing a fuzzy inference system based on a class of neural networks to assess student achievement. The developed method uses a fuzzy system, supplemented by neural networks, to enhance characteristics such as flexibility, speed, and adaptability, referred to as the adaptive neuro-fuzzy inference system (ANFIS). The results of the ANFIS model are as reliable as statistical methods but encourage a more natural way of interpreting student learning outcomes.</p><p>In comparison with these works, this study fills a gap in the methods of comprehensive assessment of educational programme quality and institutional performance based on a neuro-fuzzy approach.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.2.">Methods</head><p>This study employed methods of mathematical modelling and computational experimentation based on the statistical processing of data assessments of educational programme quality and institutional performance. The essence of the mathematical modelling methodology is to replace the original object with its mathematical model and study it using computer technology. The processing, analysis, and interpretation of calculation results were carried out through constant comparison with the results of statistical processing of expert estimates. Throughout the research, refinements were made, the mathematical model was revised, and the cycle of the computational experiment was repeated.</p><p>The methodology for assessing the quality of the curriculum and educational activities is built using artificial intelligence methods and tools, implemented in the Fuzzy Logic Toolbox of the MATLAB system in the form of an adaptive neuro-fuzzy output (ANFIS).</p><p>A fuzzy inference system can be represented as a neuro-fuzzy network -a special type of direct signal propagation neural network, or ANFIS model. The architecture of a neuro-fuzzy network is isomorphic to a fuzzy knowledge base. Neuro-fuzzy networks use differentiated implementations of triangular norms (multiplication and probabilistic OR) and smooth membership functions. This enables the use of fast training algorithms for neural networks based on the backpropagation method to tune neuro-fuzzy networks.</p><p>ANFIS implements the Sugeno fuzzy inference system through a five-layer feed-forward neural network. The purpose of each network layer is as follows:</p><p>• First layer -terms of input variables;</p><p>• Second layer -antecedents (parcels) of fuzzy rules;</p><p>• Third layer -normalisation of the degree of implementation of the rules;</p><p>• Fourth layer -conclusion of the rules;</p><p>• Fifth layer -aggregation of the result obtained according to different rules.</p><p>The network inputs are not allocated to a separate layer. Figure <ref type="figure" target="#fig_0">1</ref> shows an ANFIS network with two input variables (𝑥 1 and 𝑥 2 ) and four fuzzy rules. Three terms are used for the linguistic evaluation of the input variable, and two terms for the variable. We will use the following notation:</p><formula xml:id="formula_0">• 𝑥 1 , 𝑥 2 , ..., 𝑥 𝑛 -network inputs; • 𝑦 -network output; • 𝑅 𝑟 : if 𝑥 1 = 𝑎 1,𝑟 , ..., 𝑥 𝑛 = 𝑎 𝑛,𝑟 it 𝑦 = 𝑏 0,𝑟 + 𝑏 1,𝑟 𝑥 1 + ... + 𝑏 𝑛,𝑟 𝑥 𝑛 is a fuzzy rule with a serial number 𝑟; • 𝑚 -number of rules 𝑟 = 1, 𝑚,</formula><p>• 𝑎 𝑖,𝑟 -fuzzy term with a membership function 𝜇 𝑟 (𝑥 𝑖 ) used for linguistic evaluation of a variable 𝑥 𝑖 in the 𝑟-th rule (𝑟 = 1, 𝑚, 𝑖 = 1, 𝑛); • 𝑏 𝑞,𝑟 are the conclusion coefficients of the 𝑟-th rule (𝑟 = 1, 𝑚, 𝑞 = 0, 𝑛).</p><p>The ANFIS network operates as follows.</p><p>Layer 1. Each node of the first layer represents one term with a bell membership function. The network inputs are connected only to their terms. The number of nodes in the first layer is equal to the sum of the cardinalities of the term set of input variables. The degree of belonging of the value of the input variable to the corresponding fuzzy term is fed to the output of the node:</p><formula xml:id="formula_1">𝜇 𝑟 (𝑥 𝑖 ) = 1 1 + ⃒ ⃒ 𝑥 𝑖 −𝑐 𝑎 ⃒ ⃒ 2𝑏 ,<label>(1)</label></formula><p>where 𝑎, 𝑏 and 𝑐 are membership function parameters that can be tuned. Layer 2. The number of nodes in the second layer is 𝑚. Each node of this layer corresponds to one fuzzy rule. The node of the second layer is connected to the nodes of the first layer, which form the antecedents of the corresponding rule. Therefore, each node of the second layer can receive from 1 to 𝑛 signals. The output of the node is the degree of execution of the rule, calculated as the product of the input signals. Let us denote the outputs of the nodes of this layer as 𝜏 𝑟 , 𝑟 = 1, 𝑚. Layer 3. The number of nodes in the third layer is also 𝑚. Each node of this layer calculates the relative level of execution of the fuzzy rule according to the formula:</p><formula xml:id="formula_2">𝜏 * 𝑟 = 𝜏 𝑟 𝑚 ∑︀ 𝑗=1 𝜏 𝑗 .<label>(2)</label></formula><p>Layer 4. The number of nodes in the fourth layer is also 𝑚. Each node is connected to one node of the third layer, as well as to all inputs of the network (figure <ref type="figure" target="#fig_0">1</ref> connections to the inputs are not shown). The node of the fourth layer calculates the contribution of one fuzzy rule to the network output by the formula:</p><formula xml:id="formula_3">𝑦 𝑟 = 𝜏 * 𝑟 (𝑏 0,𝑟 + 𝑏 1,𝑟 𝑥 1 + ... + 𝑏 𝑛,𝑟 𝑥 𝑛 ).<label>(3)</label></formula><p>Layer 5. A single node of this layer sums up the contributions of all rules:</p><formula xml:id="formula_4">𝑦 = 𝑦 1 + ... + 𝑦 𝑟 + ... + 𝑦 𝑚 .<label>(4)</label></formula><p>Typical neural network training procedures can be applied to tune an ANFIS network, as it uses only differentiated features. A combination of gradient descent as a backpropagation algorithm and the least-squares method is commonly used. The error backpropagation algorithm regulates the parameters of rule antecedents, i.e., membership functions. The least-squares method evaluates the rule inference coefficients since they are linearly related to the network output.</p><p>Each iteration of the tuning procedure is performed in two steps.</p><p>In the first stage, a training sample is fed to the inputs, and based on the discrepancy between the desired and actual behaviour of the network, the optimal parameters of the nodes of the fourth layer are determined using the least-squares method.</p><p>In the second stage, the residual mismatch is transmitted from the network output to the inputs, and the parameters of the nodes of the first layer are modified by the backpropagation of the error. At the same time, the rule inference coefficients found at the previous stage do not change. The iterative tuning procedure continues as long as the mismatch exceeds a predetermined value. To tune the membership functions, in addition to the error backpropagation method, other optimisation algorithms can be used, such as the Levenberg-Marquardt method.</p><p>The ANFIS editor in MATLAB allows the automatic synthesis of a neuro-fuzzy network from experimental data. In this case, the accessories of the synthesised systems are tuned (trained) in such a way as to minimise the deviations between the results of fuzzy modelling and experimental data. The ANFIS editor is loaded using the anfisedit command.</p><p>The ANFIS editor contains 3 top menus -File, Edit and View, a visualisation area, ANFIS properties area, data loading area, source fuzzy inference system generation area, training area, testing area, current information output area, as well as Help and Close buttons, which allow calling the help window and closing the ANFIS editor, respectively.</p><p>Participants in the experiment were 22 full-time master's students and 32 graduates of higher education institutions of the previous term studying the same specialities -a total of 54 people. This number of respondents is due to the number of indicators of quality criteria because the data format of the artificial network in MATLAB supports square matrices, in this case, 54x54. Before the accreditation examination, students were offered questionnaires with a proposal to assess the quality of the educational programme and educational activities of the speciality on an assessment scale covering four levels: F, E, B, A. Student assessments were used to form the vector of artificial neural network inputs. After the accreditation examination, the expert assessments were used to check the quality of the prediction of the artificial neural network.</p><p>The experience of European countries demonstrates the expediency of involving students in accreditation examinations. For example, the Polish Accreditation Commission consists of 80-90 members appointed by the Minister of Science and Higher Education among the candidates nominated by the Senates of higher education institutions, the conferences of rectors of scientific schools and universities in Poland, and the Parliament of Students of Poland (the President of the Student Parliament is a member of the Polish Accreditation Commission). In Slovakia, the Academic Ranking and Rating Agency is a civic association founded in 2004 on the initiative of former student leaders and academics. The Slovenian Quality Assurance Agency for Higher Education SQAA-NAKVIS appoints at least three members of each expert group, of which at least one foreign expert, an expert in the field of quality assessment of higher education, and one representative from among students <ref type="bibr" target="#b39">[40]</ref>.</p><p>To ensure the representativeness of the sample, the study of its design was carried out based on randomisation. The decision on the statistical deviation of the null hypothesis regarding the differences between the averages was also associated with the procedure of random sampling.</p><p>The rating scale covers four levels of compliance with the requirements of the legislation (F, E, B, A) <ref type="bibr" target="#b40">[41]</ref>. The legislation also establishes 10 criteria for assessing the quality of the educational programme <ref type="bibr" target="#b40">[41]</ref>:</p><p>1) Design and objectives of the educational programme (4); 2) Structure and content of the educational programme (9); 3) Access to the educational programme and recognition of learning outcomes (4); 4) Teaching and learning according to the educational programme (5); 5) Control measures, evaluation of applicants for higher education and academic integrity (4); 6) Human resources (6); 7) Educational environment and material resources (6); 8) Internal quality assurance of the educational programme (7); 9) Transparency and publicity (3); 10) Learning through research <ref type="bibr" target="#b5">(6)</ref>.</p><p>In turn, each of these criteria has from 3 to 9 indicators (the number is indicated in parentheses). Together, all 10 criteria contain 54 indicators.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Results</head><p>In the first stage of the study, data on the results of the assessment of students and graduates of higher education regarding educational programmes and educational activities for each criterion were collected and statistically processed.</p><p>In the second stage, a computational experiment was performed. The cycle of the computational experiment was carried out in several stages: 1) Choice of approximation and mathematical formulation of the problem (construction of a mathematical model of the phenomenon under study); 2) Development of a computational algorithm for solving the problem; 3) Implementation of the algorithm in the form of a PC program; 4) Calculations on the PC; 5) Processing, analysis and interpretation of calculation results, comparison with the results of statistical processing of expert estimates and, if necessary, refinement or revision of the mathematical model, i.e., return to the first stage and repeat the cycle of the computational experiment.</p><p>Assessing the quality of the curriculum and learning activities is complicated by the fact that each of the 10 criteria, in turn, consists of several indicators <ref type="bibr" target="#b2">(3)</ref><ref type="bibr" target="#b3">(4)</ref><ref type="bibr" target="#b4">(5)</ref><ref type="bibr" target="#b5">(6)</ref><ref type="bibr" target="#b6">(7)</ref><ref type="bibr" target="#b7">(8)</ref><ref type="bibr" target="#b8">(9)</ref> and is due to many factors, possibly with an unknown nature of influence, which is also non-numerical. To assess the quality of the curriculum and training activities, a two-tier system based on the ANFIS package and artificial neural networks is proposed to predict assessment scores.</p><p>The ANFIS hybrid system is a combination of the Sugeno neuro-fuzzy inference method with the ability to train a five-layer artificial neural network (ANN) of direct propagation with a single output and multiple inputs, which are fuzzy linguistic variables. As input variables of the ANFIS system, we use the criteria for evaluating the quality of the educational programme of 10 groups of factors 𝑉 𝑖 (𝑖 = 1, ..., 10).</p><p>The output variable of the ANFIS system is a numerical assessment of the quality of the curriculum and training activities and is defined as a function 𝑦 = 𝑓 (𝑉 1 , 𝑉 2 , 𝑉 3 , 𝑉 4 , 𝑉 5 , 𝑉 6 , 𝑉 7 , 𝑉 8 , 𝑉 9 , 𝑉 1 0).</p><p>Layer 1 of the ANFIS system for the linguistic evaluation of input parameters uses the term set of all possible values of the linguistic variable. 𝐴 𝑉 𝑖 = {"𝐹 ", "𝐸", "𝐵", "𝐴"}. In symbolic form we write: 𝐴 𝑉 𝑖 = {𝐹 &lt; 𝑖 &gt;, 𝐸 &lt; 𝑖 &gt;, 𝐵 &lt; 𝑖 &gt;, 𝐴 &lt; 𝑖 &gt;}. The term set of the original linguistic variable y is the set of values of quality assessments of the curriculum and educational activities: 𝑇 𝑦 = {𝐹, 𝐸, 𝐵, 𝐴}. The outputs of the nodes of layer 1 are the values of the membership functions at specific values of the input variables.</p><p>Layer 2 is non-adaptive and defines the preconditions of fuzzy production rules. Production rules -a form of representation of human knowledge in the form of a sentence type -if (condition), then (action). The rules provide a formal way to present recommendations, guidance, or strategies. They are ideal in cases where the knowledge of the subject area arises from the empirical associations accumulated during the work on solving problems in a particular field.</p><p>Each node of this layer is connected to those nodes of layer 1, which form the prerequisites of the corresponding rule. To solve this problem, four fuzzy production rules are formulated: 𝑃 = {𝑝 1 , 𝑝 2 , 𝑝 3 , 𝑝 4 }, because according to the features of the ANFIS network, the number of network rules must correspond to the dimension of the term set of the source variable 𝑦.</p><p>Nodes perform a fuzzy logical operation "I" (min). The outputs of the nodes of this layer are the degree of truth (fulfilment) of the preconditions of each of the four fuzzy production rules, which are calculated by the formulas:</p><formula xml:id="formula_5">⎧ ⎪ ⎪ ⎨ ⎪ ⎪ ⎩ 𝑤 1 = min(𝜇 𝐹 1 (𝑉 1 ), 𝜇 𝐹 2 (𝑉 2 ), 𝜇 𝐹 3 (𝑉 3 ), 𝜇 𝐹 4 (𝑉 4 )) 𝑤 2 = min(𝜇 𝐸1 (𝑉 1 ), 𝜇 𝐸2 (𝑉 2 ), 𝜇 𝐸3 (𝑉 3 ), 𝜇 𝐸4 (𝑉 4 )) 𝑤 3 = min(𝜇 𝐵1 (𝑉 1 ), 𝜇 𝐵2 (𝑉 2 ), 𝜇 𝐵3 (𝑉 3 ), 𝜇 𝐵4 (𝑉 4 )) 𝑤 4 = min(𝜇 𝐴1 (𝑉 1 ), 𝜇 𝐴2 (𝑉 2 ), 𝜇 𝐴3 (𝑉 3 ), 𝜇 𝐴4 (𝑉 4 ))</formula><p>.</p><p>(</p><formula xml:id="formula_6">)<label>5</label></formula><p>Layer 3 normalises the degree of implementation of each of the fuzzy production rules (calculation of the relative degree of implementation of the rules) as follows:</p><formula xml:id="formula_7">𝑤 ℎ = 𝑤 ℎ ℎ ∑︁ 𝑖=1 𝑤 𝑖 ,<label>(6)</label></formula><p>where ℎ = 1, ..., 4 is the production rule number. Layer 4 calculates the contribution of each fuzzy production rule to the output of the network according to the formula.</p><formula xml:id="formula_8">𝑦 ℎ (𝑣, 𝑉 ) = 𝑤 ℎ (𝑣 (0) ℎ + 𝑣<label>(1)</label></formula><p>ℎ 𝑉 1 + 𝑣</p><p>ℎ 𝑉 2 + 𝑣</p><p>(3)</p><formula xml:id="formula_10">ℎ 𝑉 3 + 𝑣 (4) ℎ 𝑉 4 + 𝑣 (5) ℎ 𝑉 5 ),<label>(7)</label></formula><p>where 𝑣</p><p>ℎ -coefficients of the initial function (𝑖 = 0, ..., 5). Layer 5 summarises the contributions of all the rules:</p><formula xml:id="formula_12">𝑦 = 4 ∑︁ 𝑖=1 𝑦 𝑖 .<label>(8)</label></formula><p>Training of the ANFIS network was carried out for 24 epochs by a hybrid method. During training, the type of membership functions, the type of initial function, and their coefficients are selected. As a result of training a fuzzy network for four rules, Gaussian functions were adopted as membership functions, and a linear function was adopted as the initial function. As a result of training, membership functions and their coefficients were also obtained.</p><p>To assess each of the 10 groups of factors that affect the quality of the curriculum and educational activities by the evaluation criteria, 10 modules are used, which are implemented using artificial neural networks. Thus, it is necessary to design neural networks, a mathematical model of a comprehensive assessment of the quality of the educational program and educational activities based on the methods of the neuro-fuzzy approach. For this purpose, the Neural Network Toolbox was used. To form neural networks, it is necessary to determine their topology, learning mechanism, and testing procedure. Also, the training of an artificial neural network requires input data -a sample of answers of students and graduates with reliable quality indicators, determined based on these criteria.</p><p>A standard 𝐿-layer feedforward neural network consists of a layer of input nodes (we will stick to the position that it is not contained in the network as an independent layer), (𝐿 − 1) hidden layers, and an output layer that is connected in series in the forward direction and does not contain a connection between elements within a layer and feedback between layers. The most popular class of multilayer feed-forward networks is formed by multilayer perceptrons, where each computational element uses a limit or sigmoidal activation function. A multilayer perceptron can form arbitrarily complex decision limits and implement arbitrary Boolean functions. The development of a backpropagation algorithm for determining weights in a multilayer perceptron has made these networks the most popular among researchers and users of neural networks. The vast majority of programs involve the use of such multilayer perceptrons. Networks consisting of successive layers of neurons are more commonly used. Although any network without feedback can be represented as successive layers, the presence of many neurons in each layer can significantly speed up calculations using matrix accelerators.</p><p>The popularity of perceptrons is due to a wide range of available tasks that can be solved with their help. In the general case, they solve the problem of approximating multidimensional functions, that is, constructing a multidimensional mapping 𝐹 : 𝑥 ⇒ 𝑦 that generalizes a given set of parameters {𝑥 𝛼 ⇒ 𝑦 𝛼 }.</p><p>Depending on the type of output variables (the type of input variables is not critical), the approximation of functions can take the form of classification (discrete set of initial values), or regression (continuous initial values).</p><p>Many practical problems of pattern recognition, noise filtering, time series prediction, etc. come down to basic settings. The reason for the popularity of perceptrons is that, for their range of tasks, they are, firstly, universal, and secondly, they are efficient in terms of the computational complexity of devices.</p><p>As a result of the development of neurocomputing, a large number of efficient models of neural networks have been created, focused on solving various problems. Due to this, artificial neural networks are successfully used to solve a wide class of practical problems. Therefore, when solving a specific problem, it is necessary to solve the issue of choosing the most appropriate neural network model, its parameters, and the training method.</p><p>Typically, a network consists of many sensor elements (input nodes or source nodes) that form an input layer; one or more hidden layers of computational neurons, and one output layer of neurons. The input signal propagates through the network in a forward direction from layer to layer. Such networks are usually called multilayer perceptrons. They are a generalization of a single layer perceptron.</p><p>Multilayer perceptrons are successfully used to solve various problems. At the same time, supervised learning is performed using such a popular algorithm as the error back-propagation algorithm. This method consists of error correction (error-correction learning rule). It can be thought of as a generalization of the equally popular adaptive filtering algorithm, the mean squared error minimization (LMS) algorithm.</p><p>Multilayer perceptrons have three characteristic features.</p><p>1. Each neuron of the network has a non-linear activation function. It should be noted that this non-linear function is smooth (that is, differentiated everywhere), in contrast to the hard threshold function used in the Rosenblatt perceptron. The most popular form of a function that satisfies this requirement is the sigmoidal nonlinearity, defined by the logistic function</p><formula xml:id="formula_13">𝑦 𝑖 = 1 1 + exp(−𝑣 𝑗 ) ,<label>(9)</label></formula><p>where 𝑣 𝑗 is the induced local field (i.e., the weighted sum of all synaptic inputs plus the limit value) of neuron 𝑗; 𝑦 𝑗 is the output of the neuron. The presence of non-linearity plays a very important role, since otherwise the "input-output" mapping of the network can be reduced to a conventional single-layer perceptron. Moreover, the use of the logistic function is biologically motivated, since it takes into account the recovery phase of a real neuron. 2. The network contains one or more layers of hidden neurons that are not part of the input or output of the network. These neurons allow the network to learn how to solve complex problems by sequentially extracting the most important features of the input image (vector).</p><p>The network has a high degree of connectivity (connectivity), implemented using synaptic connections. Changing the level of network connectivity requires changing the plurality of synaptic connections or their weights.</p><p>The combination of all these properties, along with learning-by-doing, provides the computational power of a multilayer perceptron. However, these same qualities are the reason for the incompleteness of modern knowledge about the behaviour of such networks. First, the distributed form of nonlinearity and the high connectivity of the network significantly complicate the theoretical analysis of a multilayer perceptron. Second, the presence of hidden neurons makes the learning process more difficult to visualise. It is in the learning process that it is necessary to determine which signs of the input signal should be given by hidden neurons. Then the learning process becomes even more difficult, since the search must be performed in a wide range of possible functions, and the choice must be made among alternative representations of the input images.</p><p>The emergence of the backpropagation algorithm was a landmark event in the development of neural networks, since it implements a computationally efficient method for training a multilayer perceptron. The backpropagation algorithm does not offer a truly optimal solution to all potential problems, but it is most effective in learning multilayer machines.</p><p>An artificial neural network for the analysis of indicators of the quality of the educational program and educational activities will have the number of input neurons (according to the number of indicators for all criteria) 54; output neurons -54. Input signals were determined based on students' assessments of each indicator of this quality criterion, while the scale F, E, B, A were translated into numerical 1; 2; 3; 4 respectively. Part of the data is given in table <ref type="table">1</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Table 1</head><p>Input signals (T) based on students' assessments of quality criteria.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Indicators of</head><p>Student grades quality criteria 1 2 3 4 5 6 7 ... It is important that the neural network can predict expert assessments if student and graduate assessments are to be ranked in ascending order based on the determination of the grade point average. According to the hypothesis, we assume that students with higher academic performance are better acquainted with the goals, structure, and content of the educational program, the process and characteristics of teaching and learning according to the educational program, control measures, assessment system, and all other aspects of educational activities. assessments of the quality of the educational program and educational activities will be more objective.</p><p>The Neural Network Toolbox application package Matlab Mathematical Modeling Environment (version R2014a) was used in the work. After starting the Matlab system, enter the nntool command on the command line, which opens the window for entering data and creating a neural network (Neural Network / Data Manager) (figure <ref type="figure" target="#fig_2">2</ref>). To create a new network, we chose New, to view the data you need to select Import. The data is contained in the P.mat file. This file is a matrix of two lines, in which the numbers 1-54 are indicators of quality criteria, and 55-108 -are the evaluation of students and graduates on the indicators of quality criteria. Its contents are stored in the P.txt file. The next step is to import the data (figure <ref type="figure" target="#fig_3">3</ref>). The next step was to create data ("T") -goals, which are an array of size 54x54, which contains information about the grades given by the participants of the experiment -full-time master's students (22 people) and graduates of higher education institutions there are specialties (32 people) -a total of 54 people. This number of respondents is due to the number of indicators of quality criteria because the data format of the artificial network in Matlab supports square matrices, in this case, 54x54. The data is stored in a T.mat file. Its contents can be viewed using a text editor.</p><p>We import data in the same way as for the array P.</p><p>In the next step, a neural network was created (figure <ref type="figure" target="#fig_4">4</ref>). An artificial neural network for the analysis of indicators of the quality of the educational program and educational activities will have the number of input neurons (according to the number of indicators for all criteria) 54; output neurons -54. Input signals were determined based on students' assessments for each indicator of this quality criterion, while the scales F, E, B, A were converted to numerical 1; 2; 3; 4 respectively.</p><p>The configuration of the neural network of direct propagation is chosen based on a heuristic rule: the number of neurons of the hidden layer is equal to half of the total number of input and output neurons. The artificial neural network for the analysis of quality indicators of the educational program and educational activity will have the number of input neurons 2 (according to the dimensionality of the data -indicators of quality criteria and student evaluation); source neurons 54, therefore, the number of hidden neurons is 28. The View button allows you to view the network structure (figure <ref type="figure" target="#fig_5">5</ref>).</p><p>In our case, 2 is the number of input neurons, which is known to be selected based on the dimension of the input data (1 -indicators of quality criteria; 2 -student assessments). Output neurons -54. The configuration of the neural network of direct propagation (feed-forward backdrop) is chosen based on the heuristic rule: the number of neurons in the hidden layer is equal to half the total number of input and output neurons, so the hidden layer has 28 neurons.</p><p>The next stage is network training and coaching. Double-clicking with the left mouse button on the created neural network network1 in the window of the Neural Network / Data Manager opens a window with the network.</p><p>The View tab presents the neural network itself. Go to the Reinitialize Weights tab, where the Input Ranges column selects the P input from the Get from the input list. Then press the Set Input Ranges and Initialize Weights buttons in succession allowing us to initialize the scales needed to initialize the entire network.</p><p>The next step is network learning.  Learning the backpropagation method involves two passes through all layers of the network: forward and backward. In a forward pass, the image (incoming vector) is fed to the sensor nodes of the network, after which it propagates through the network from layer to layer. As a result, a set of output signals is generated, which is the actual response of the network to a given input image. In forward traversal, all synaptic weights of the network are fixed. In a backward pass, all synaptic weights are adjusted according to the error correction rule, namely: the actual output of the network is subtracted from the desired (target) response, resulting in an error signal. This signal subsequently propagates through the network in the opposite direction of the synaptic connections. Hence the name -backpropagation algorithm. The synaptic weights are tuned to bring the network output as close as possible to the desired statistical meaning. The back-propagation algorithm is sometimes referred to as the simplified back-propagation algorithm. The learning process using this algorithm is called back-propagation learning.</p><p>Going to the Train tab opens a learning window in which P and T are selected instead of input data and targets, respectively (figure <ref type="figure" target="#fig_7">6</ref>  You can calculate that the average network error is 0.0321, which indicates the efficiency of the system.</p><p>After learning the network, you can proceed to data forecasting. Returning to the Neural Network / Next, you need to return to the Network window. In the Simulate tab of the input values house, the P1 array is selected, and the Outputs output value is renamed to forecast (figure <ref type="figure" target="#fig_8">7</ref>). After clicking the Simulate Network button, you can return to the Neural Network / Data Manager window and, by clicking the Export button, copy the source forecast array to the Matlab workspace. After receiving the table in the workspace, pay attention to the last column, which is responsible for forecasting (figure <ref type="figure" target="#fig_9">8</ref>).</p><p>The data obtained in the study can be viewed in the forecast.mat file.</p><p>Comparing the data issued by the system and the real data, we can see that the neural network does make predictions that are quite close to reality. Compared with expert estimates, the average absolute  error is 0.0321, the relative error is 7.08%.</p><p>In the second part of the experiment, forecasting was carried out using a different type of neural network -a neuro-fuzzy network, or ANFIS-model.</p><p>Expert estimates are used as validation data. Create data files: training.dat, testing.dat, checking.dat. It should be noted that attempts to consider large data volumes lead to a reduction in the number of observations in the training sample and its simultaneous unjustified growth, which can negatively affect the network's ability to learn. So, first you need to turn the available information into a form that is understandable and meaningful for the neuro-fuzzy network. Consider the average value of the assessment of each of the 10 criteria for assessing the quality of the educational program. For training, we use the average scores of all students for each of the 10 criteria. For testing, the marks of students numbered from 12 to 30 are used, for verification -the marks that were put by 31 students.</p><p>We preliminarily transpose the data, so the numbers of students will be in the rows, and the grades according to the quality criteria will be in the columns. The data in the files contains 10 columns -9 grades (incoming) and 1 grade (source). The first file contains 54 lines and 10 columns. The second has 18 rows and 10 columns. The third has one row and 10 columns. Anfis Editor is used to building MATLAB fuzzy neural networks. Run the editor with the anfisedit command. In the Load data menu, select Training, and From disk, click the load data button. In the window that opens, select the previously created training.dat file. In the Load data menu, select Testing and From disk, click the load data button. In the window that opens, select the previously created testing.dat. In the Load data menu, select Checking and From disk, and click the load data button. In the window that opens, select the previously created checking.dat. The visualization area contains two types of information: when training the system, the learning curve in the form of a graph of the dependence of the learning error on the iteration ordinal number; when loading data and testing the system -experimental data and simulation results.</p><p>Experimental data and simulation results are displayed as a set of points in two-dimensional space. In this case, the serial number of the data line in the sample (training, test, or control) is plotted along the abscissa axis, and the value of the initial variable of this sample line is plotted along the ordinate axis. The following markers are used: blue dot (.) -test set; blue circle (o) -training sample; blue plus (+) -control sample; a red asterisk (*) -simulation results.</p><p>Then, having set the Generate FIS menu switch to the Grid partition position, you should press the Generate FIS button. In this case, the model has 10 input variables, each of which corresponds to 9 terms of the gaussmf type. The original variable is determined by a linear function. Let's generate a Sugeno-type fuzzy inference system by pressing the Generate FIS button. In the window that opens, set 3 membership functions of the gaussmf type for each input variable. The choice of the property function here is because we assume a normal distribution for a random variable, defined by a Gaussian function according to probability theory. For the output variable, we set the membership function const.</p><p>To train the hybrid network, we will choose the backdrop method (error backpropagation) with an error level of 0 and a number of cycles of 10. Let's start training the hybrid network (figure <ref type="figure" target="#fig_12">10</ref>).</p><p>As can be seen from figure <ref type="figure" target="#fig_12">10</ref>, according to the training results, the average error is approximately 0.007.</p><p>We test the fuzzy inference system first on the training set. Now let's test the resulting fuzzy inference system on the known values of expert estimates. Now we download this sample in testing mode in the Anfis editor. The results are shown in Figure <ref type="figure" target="#fig_14">12</ref>. The mean score of the experts is 3.99; network prediction of the neural fuzzy network is 3.51. The relative forecast error is 12.57%.</p><p>Comparing the prediction errors of the neuro-fuzzy network (12.57%) and the L-layer feed-forward    Table <ref type="table" target="#tab_2">4</ref> also shows that the quality of the program and educational activities is at a fairly high level, which reflects the average score of the peer review.</p><p>102-124</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Discussion</head><p>The study aimed to demonstrate that the challenge of predicting the assessment of educational programme quality and institutional performance can be adequately addressed through an artificial neural network, obtaining a comprehensive evaluation based on a neuro-fuzzy approach. The mathematical model involves the use of neural networks and is based on the technology of analytical processing of statistical data. Standard methods of mathematical statistics are used to analyse the estimates received from respondents.</p><p>The proposals for using students as experts in evaluating educational programmes and institutional activities are debatable; it would be more appropriate to involve teachers from other educational institutions. However, in the process of preparing for self-assessment, this approach can be considered quite suitable.</p><p>The results of the neural network should be considered not as final, but as a test. As noted, for more detailed conclusions, it is necessary to train the network on a larger amount of experimental data.</p><p>The network structure has room for further improvement and customisation in future studies.</p><p>The assumption that a sample of students and graduates can prepare a dataset for setting up and teaching an artificial neural network to evaluate the quality of educational programmes and activities is confirmed by ordering the quality assessments of students and graduates in ascending order of their grade point average. In practice, this allows predicting the results and identifying existing shortcomings to eliminate them before the accreditation examination. However, the difficulty of this method lies in choosing the architecture of the neural network and preparing a training sample to configure it. In particular, future plans include increasing the volume of the input vector of the artificial neural network, with the form based on estimates of teachers, stakeholders, and experts.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Conclusions</head><p>As a result of developing a mathematical model for the comprehensive evaluation of educational programme quality and institutional performance based on neuro-fuzzy approaches, we have managed to achieve two key outcomes. Firstly, we have devised a mechanism for obtaining a quantitative assessment of educational programmes and activities that will enable higher education institutions to detect shortcomings and potential problems, and address them prior to accreditation examinations. Secondly, we have demonstrated that a sample of students and graduates can be used to prepare a training dataset for configuring and training an artificial neural network capable of adequately performing a comprehensive assessment of educational programmes and institutional activities. This can be accomplished by arranging the assessments of programme quality and educational activities provided by students and graduates in ascending order based on their grade point average. It is emphasised that these methods are effective provided they adhere to the principles of student-centredness and academic freedom.</p><p>By preparing a training sample for setting up and teaching an artificial neural network based on a sample of students and graduates, we were able to evaluate the quality of educational programmes and activities. A comparison of the results produced by an artificial neural network of direct propagation with one output and several inputs with real data shows that the neural network generates predictions close to reality. Compared with expert estimates, the average absolute error was 0.0321, and the relative error was 7.08%.</p><p>The results of this study can be applied in the practice of higher education institutions to predict outcomes, identify existing shortcomings, and eliminate them before accreditation examinations.</p><p>We see prospects for further research in the application of software products based on neural network theory to automate the processes of organisation, control, and analysis of the educational process, as well as the introduction of neural network software for the direct training of students in certain disciplines.</p><p>Declaration on Generative AI: The authors have not employed any Generative AI tools.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: An example of an ANFIS network.</figDesc><graphic coords="5,128.41,571.44,338.47,110.53" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head></head><label></label><figDesc>. . . . . . . . . . . . . . . . . . . . . .</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: Data entry and neural network creation windows.</figDesc><graphic coords="11,72.00,396.19,451.28,239.71" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Figure 3 :</head><label>3</label><figDesc>Figure 3: Importing data.</figDesc><graphic coords="12,72.00,131.34,451.27,179.77" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Figure 4 :</head><label>4</label><figDesc>Figure 4: Creating a neural network.</figDesc><graphic coords="13,128.41,65.60,338.47,340.94" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_5"><head>Figure 5 :</head><label>5</label><figDesc>Figure 5: The structure of the neural network.</figDesc><graphic coords="13,128.41,445.97,338.47,110.58" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_6"><head></head><label></label><figDesc>). On the right of the Training Results column, you need to change the name of the Outputs and Errors to O and E, respectively. Then pressing the Train Network button will start network training, the process of which can be observed in the Neural Network Training window. You can close the window after graduation.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_7"><head>Figure 6 :</head><label>6</label><figDesc>Figure 6: Neural network learning.</figDesc><graphic coords="14,128.41,144.44,338.47,478.76" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_8"><head>Figure 7 :</head><label>7</label><figDesc>Figure 7: Simulate.</figDesc><graphic coords="15,105.84,309.49,383.59,326.88" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_9"><head>Figure 8 :</head><label>8</label><figDesc>Figure 8: Getting a table with forecasting in the work area.</figDesc><graphic coords="16,72.00,65.60,451.27,240.02" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_10"><head></head><label></label><figDesc>Training.dat file (first three lines): 3.5000 3.3333 3.5000 3.4000 3.5000 3.3333 3.3333 3.5714 3.3333 3.1667 4.0000 3.4444 3.2500 3.8000 3.7500 3.5000 3.6667 3.4286 3.6667 4.0000 3.2500 3.6667 3.2500 3.4000 3.5000 3.6667 3.0000 3.5714 4.0000 3.8333Testing.dat file (first four lines): 3.2500 3.4444 3.7500 3.6000 3.2500 3.8333 3.5000 3.4286 3.6667 3.5000 3.7500 3.6667 4.0000 3.2000 3.5000 3.3333 3.1667 3.8571 3.6667 3.6667 4.0000 3.5556 3.5000 3.6000 3.5000 3.8333 3.5000 3.2857 3.3333 3.8333 3.5000 3.5556 3.2500 3.4000 3.5000 3.6667 3.6667 3.5714 4.0000 3.5000 Checking.dat file: 3.5000 3.3333 3.2500 3.4000 4.0000 3.3333 3.8333 3.2857 3.0000 3.8333</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_11"><head>Figure 9 :</head><label>9</label><figDesc>Figure 9: Data for network training and validation.</figDesc><graphic coords="18,72.00,65.60,451.27,240.02" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_12"><head>Figure 10 :</head><label>10</label><figDesc>Figure 10: Network training error.</figDesc><graphic coords="18,128.41,345.04,338.45,282.92" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_13"><head>Figure 11 :</head><label>11</label><figDesc>Figure 11: Network training results.</figDesc><graphic coords="19,128.41,65.61,338.47,284.97" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_14"><head>Figure 12 :</head><label>12</label><figDesc>Figure 12: The results of network testing on known values of expert estimates.</figDesc><graphic coords="19,128.41,389.99,338.46,283.60" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_0"><head>Table 2</head><label>2</label><figDesc>Output signals (O). Data Manager window, you need to create additional input by clicking the New button. Going to the Data tab, the name of the data changes, for example, to P1, and the values are set as follows: values 1-54 still indicate the numbers of indicators of quality criteria of educational programs and educational activities, and 56-109 assessments of students and graduates quality, and the last column -projected expert assessments.</figDesc><table><row><cell>Indicators of</cell><cell></cell><cell></cell><cell></cell><cell cols="2">Student grades</cell><cell></cell><cell></cell><cell></cell><cell></cell></row><row><cell>quality criteria</cell><cell>1</cell><cell>2</cell><cell>3</cell><cell>4</cell><cell>5</cell><cell>6</cell><cell>7</cell><cell>...</cell><cell>54</cell></row><row><cell>1</cell><cell cols="9">3.1985 3.252 3.3058 3.3541 3.3933 3.4235 3.4475 . . . 3.9704</cell></row><row><cell>2</cell><cell cols="9">3.4521 3.3478 3.2644 3.2035 3.1633 3.1404 3.1319 . . . 3.9997</cell></row><row><cell>3</cell><cell cols="9">3.1516 3.1812 3.219 3.2627 3.3062 3.3417 3.3638 . . . 3.9992</cell></row><row><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell></row><row><cell>54</cell><cell>4</cell><cell cols="8">3.4192 3.3522 3.3128 3.291 3.2798 3.2756 . . . 3.9716</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_1"><head>Table 3</head><label>3</label><figDesc>Errors (E).</figDesc><table><row><cell>Indicators of</cell><cell></cell><cell></cell><cell></cell><cell cols="3">Student grades</cell><cell></cell><cell></cell><cell></cell></row><row><cell>quality criteria</cell><cell>1</cell><cell>2</cell><cell>3</cell><cell>4</cell><cell>5</cell><cell>6</cell><cell>7</cell><cell>...</cell><cell>54</cell></row><row><cell>1</cell><cell>-0.199</cell><cell>0.748</cell><cell cols="4">-0.306 -0.354 0.607 -0.424</cell><cell cols="3">0.552 . . . 0.0000237</cell></row><row><cell>2</cell><cell>0.548</cell><cell cols="8">-0.348 -0.264 -0.203 0.837 -0.140 -0.132 . . . 0.029607</cell></row><row><cell>3</cell><cell cols="2">-0.152 -0.181</cell><cell>0.781</cell><cell cols="2">-0.263 0.694</cell><cell>0.658</cell><cell cols="3">-0.364 . . . 0.00027</cell></row><row><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell></row><row><cell>54</cell><cell cols="9">0.58076 -0.3522 -0.3128 -0.2909 0.7202 -0.2755 -0.2769 . . . 0.028442</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_2"><head>Table 4</head><label>4</label><figDesc>Neural network forecast and expert evaluation.</figDesc><table><row><cell>Indicators of quality criteria</cell><cell cols="2">Forecast Estimates</cell></row><row><cell>1</cell><cell>3.999977</cell><cell>4</cell></row><row><cell>2</cell><cell>3.974844</cell><cell>4</cell></row><row><cell>3</cell><cell>3.999750</cell><cell>4</cell></row><row><cell>4</cell><cell>3.999379</cell><cell>3</cell></row><row><cell>5</cell><cell>3.956661</cell><cell>4</cell></row><row><cell>6</cell><cell>3.991731</cell><cell>4</cell></row><row><cell>7</cell><cell>3.985698</cell><cell>4</cell></row><row><cell>. . .</cell><cell>. . .</cell><cell>. . .</cell></row><row><cell>54</cell><cell>3.970182</cell><cell>4</cell></row></table></figure>
		</body>
		<back>
			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">Cocalc as a learning tool for neural network simulation in the special course &quot;foundations of mathematic informatics</title>
		<author>
			<persName><forename type="first">O</forename><forename type="middle">M</forename><surname>Markova</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Semerikov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Popel</surname></persName>
		</author>
		<ptr target="https://ceur-ws.org/Vol-2104/paper_204.pdf" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 14th International Conference on ICT in Education, Research and Industrial Applications. Integration, Harmonization and Knowledge Transfer. Volume II: Workshops</title>
		<title level="s">CEUR Workshop Proceedings</title>
		<editor>
			<persName><forename type="first">V</forename><surname>Ermolayev</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><forename type="middle">C</forename><surname>Suárez-Figueroa</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">V</forename><surname>Yakovyna</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">V</forename><forename type="middle">S</forename><surname>Kharchenko</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">V</forename><surname>Kobets</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">H</forename><surname>Kravtsov</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">V</forename><forename type="middle">S</forename><surname>Peschanenko</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Y</forename><surname>Prytula</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><forename type="middle">S</forename><surname>Nikitchenko</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><surname>Spivakovsky</surname></persName>
		</editor>
		<meeting>the 14th International Conference on ICT in Education, Research and Industrial Applications. Integration, Harmonization and Knowledge Transfer. Volume II: Workshops<address><addrLine>Kyiv, Ukraine</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2018">May 14-17, 2018. 2104. 2018</date>
			<biblScope unit="page" from="388" to="403" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Neural network analytics and forecasting the country&apos;s business climate in conditions of the coronavirus disease (COVID-19)</title>
		<author>
			<persName><forename type="first">S</forename><surname>Semerikov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Kucherova</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Los</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Ocheretin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">CEUR Workshop Proceedings</title>
		<imprint>
			<biblScope unit="volume">2845</biblScope>
			<biblScope unit="page" from="22" to="32" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Road Sign Recognition Using Convolutional Neural Networks</title>
		<author>
			<persName><forename type="first">V</forename><surname>Mukovoz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Vakaliuk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Semerikov</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-031-71804-5_12</idno>
	</analytic>
	<monogr>
		<title level="m">Information Technology for Education, Science, and Technics</title>
		<title level="s">Lecture Notes on Data Engineering and Communications Technologies</title>
		<editor>
			<persName><forename type="first">E</forename><surname>Faure</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Y</forename><surname>Tryus</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">T</forename><surname>Vartiainen</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">O</forename><surname>Danchenko</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Bondarenko</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">C</forename><surname>Bazilo</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">G</forename><surname>Zaspa</surname></persName>
		</editor>
		<meeting><address><addrLine>Nature Switzerland; Cham</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2024">2024</date>
			<biblScope unit="volume">222</biblScope>
			<biblScope unit="page" from="172" to="188" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Can you leave high school behind?</title>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">E</forename><surname>Black</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Lincove</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Cullinane</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Veron</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.econedurev.2015.02.003</idno>
	</analytic>
	<monogr>
		<title level="j">Economics of Education Review</title>
		<imprint>
			<biblScope unit="volume">46</biblScope>
			<biblScope unit="page" from="52" to="63" />
			<date type="published" when="2015">2015</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<monogr>
		<author>
			<persName><forename type="first">B</forename><surname>Wächter</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kelo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Q</forename><surname>Lam</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Effertz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Jost</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Kottowski</surname></persName>
		</author>
		<idno type="DOI">10.2861/426164</idno>
		<title level="m">University quality indicators: a critical assessment</title>
				<imprint>
			<date type="published" when="2015">2015</date>
		</imprint>
		<respStmt>
			<orgName>Directorate General for Internal Policies, Policy Department</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">Technical Report</note>
	<note>B: Structural and Cohesion Policies</note>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Indicators of quality in teacher education: Looking at features of teacher education from an international perspective</title>
		<author>
			<persName><forename type="first">K</forename><surname>Hammerness</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Klette</surname></persName>
		</author>
		<idno type="DOI">10.1108/S1479-367920140000027013</idno>
	</analytic>
	<monogr>
		<title level="m">Promoting and sustaining a quality teacher workforce</title>
				<imprint>
			<publisher>Emerald Group Publishing Limited</publisher>
			<date type="published" when="2015">2015</date>
			<biblScope unit="volume">27</biblScope>
			<biblScope unit="page" from="239" to="277" />
		</imprint>
	</monogr>
	<note>International Perspectives on Education and Society</note>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">Graph analytical method for determining the complex quality indicator of qualimetry objects</title>
		<author>
			<persName><forename type="first">O</forename><surname>Cherniak</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Sorocolat</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Kanytska</surname></persName>
		</author>
		<idno type="DOI">10.30837/ITSSI.2020.14.169</idno>
	</analytic>
	<monogr>
		<title level="j">Innovative Technologies and Scientific Solutions for Industries</title>
		<imprint>
			<biblScope unit="volume">4</biblScope>
			<biblScope unit="page" from="169" to="175" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Mathematical model of measuring the quality of services of the higher education institutions</title>
		<author>
			<persName><forename type="first">I</forename><surname>Parvu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">M</forename><surname>Ipate</surname></persName>
		</author>
		<ptr target="v:2:y:2007:i:1(2)_fall2007:5" />
	</analytic>
	<monogr>
		<title level="j">Journal of Applied Economic Sciences</title>
		<imprint>
			<biblScope unit="volume">2</biblScope>
			<date type="published" when="2007">2007</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">Optimizing Franchising Investment Decision Using Electre and Rompedet Methods</title>
		<author>
			<persName><forename type="first">C</forename><surname>Isac</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Nita</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Dura</surname></persName>
		</author>
		<ptr target="https://www.iupindia.in/510/IJME_Optimizing_Franchising_Investment_Decision_7.html" />
	</analytic>
	<monogr>
		<title level="j">The IUP Journal of Managerial Economics</title>
		<imprint>
			<biblScope unit="volume">8</biblScope>
			<biblScope unit="page" from="7" to="32" />
			<date type="published" when="2010">2010</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Bagatokryterialna optymizatsiia liniinykh system [Multicriteria optimization of linear systems</title>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">E</forename><surname>Kondruk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">M</forename><surname>Maliar</surname></persName>
		</author>
		<ptr target="https://dspace.uzhnu.edu.ua/jspui/handle/lib/24042" />
	</analytic>
	<monogr>
		<title level="m">Autdor-Shark</title>
				<meeting><address><addrLine>Uzhgorod, Ukraine</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">The Peculiarities of Multiplicative Coagulation a Partial Criteria Into One Generalized Index</title>
		<author>
			<persName><forename type="first">Y</forename><forename type="middle">I</forename><surname>Grytsyuk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">Y</forename><surname>Grytsiuk</surname></persName>
		</author>
		<ptr target="https://nv.nltu.edu.ua/Archive/2014/24_11/57.pd" />
	</analytic>
	<monogr>
		<title level="j">Scientific Bulletin of UNFU</title>
		<imprint>
			<biblScope unit="volume">24</biblScope>
			<biblScope unit="page" from="341" to="352" />
			<date type="published" when="2014">2014</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Teoriya optymalnogo vyboru. Pidkryteriyi paretivskoyi zgortky kryteriyiv [The theory of optimal choice. Sub-criteria for convolution of Pareto criteria</title>
		<author>
			<persName><forename type="first">O</forename><forename type="middle">Y</forename><surname>Chervak</surname></persName>
		</author>
		<ptr target="https://dspace.uzhnu.edu.ua/jspui/handle/lib/7372" />
	</analytic>
	<monogr>
		<title level="j">Naukovyj visnyk Uzhgorodskogo universytetu</title>
		<imprint>
			<biblScope unit="volume">30</biblScope>
			<biblScope unit="page" from="28" to="30" />
			<date type="published" when="2010">2010</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<monogr>
		<author>
			<persName><forename type="first">I</forename><surname>Goodfellow</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Bengio</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Courville</surname></persName>
		</author>
		<ptr target="http://www.deeplearningbook.org" />
		<title level="m">Deep Learning, Adaptive Computation and Machine Learning series</title>
				<imprint>
			<publisher>MIT Press</publisher>
			<date type="published" when="2016">2016</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Neural Networks: An Introduction</title>
		<author>
			<persName><forename type="first">B</forename><surname>Müller</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Reinhardt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">T</forename><surname>Strickland</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-642-57760-4</idno>
	</analytic>
	<monogr>
		<title level="j">Physics of Neural Networks</title>
		<imprint>
			<date type="published" when="1995">1995</date>
			<publisher>Springer-Verlag</publisher>
		</imprint>
	</monogr>
	<note>2 ed</note>
</biblStruct>

<biblStruct xml:id="b14">
	<monogr>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">N</forename><surname>Sivanandam</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Sumathi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">N</forename><surname>Deepa</surname></persName>
		</author>
		<title level="m">Introduction to neural networks using Matlab 6</title>
				<meeting><address><addrLine>New Delhi</addrLine></address></meeting>
		<imprint>
			<publisher>Tata McGraw-Hill Education</publisher>
			<date type="published" when="2006">2006</date>
			<biblScope unit="volume">0</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">Proektirovanie nechetkikh sistem sredstvami MatLab [Fuzzy systems design of by means of MatLab</title>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">D</forename><surname>Shtovba</surname></persName>
		</author>
		<ptr target="http://pistunovi.inf.ua/shtovba_proek_nechet_sistem__matlab.pdf" />
	</analytic>
	<monogr>
		<title level="m">Goryachaya Liniya-Telekom</title>
				<meeting><address><addrLine>Moscow</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2007">2007</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">Fuzzy Technology-Based Cause Detection of Structural Cracks of Stone Buildings</title>
		<author>
			<persName><forename type="first">S</forename><surname>Shtovba</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Pankevych</surname></persName>
		</author>
		<ptr target="https://ceur-ws.org/Vol-2105/10000209.pdf" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 14th International Conference on ICT in Education, Research and Industrial Applications. Integration, Harmonization and Knowledge Transfer. Volume I: Main Conference</title>
		<title level="s">CEUR Workshop Proceedings</title>
		<editor>
			<persName><forename type="first">V</forename><surname>Ermolayev</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><forename type="middle">C</forename><surname>Suárez-Figueroa</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><surname>Lawrynowicz</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">R</forename><surname>Palma</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">V</forename><surname>Yakovyna</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">H</forename><forename type="middle">C</forename><surname>Mayr</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><forename type="middle">S</forename><surname>Nikitchenko</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><surname>Spivakovsky</surname></persName>
		</editor>
		<meeting>the 14th International Conference on ICT in Education, Research and Industrial Applications. Integration, Harmonization and Knowledge Transfer. Volume I: Main Conference<address><addrLine>Kyiv, Ukraine</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2018">May 14-17, 2018. 2105. 2018</date>
			<biblScope unit="page" from="209" to="218" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">Hazards and risks in assessing the impact of oil and gas companies on the environment</title>
		<author>
			<persName><forename type="first">I</forename><surname>Khvostina</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Oliinyk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Semerikov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Solovieva</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Yatsenko</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Kohut-Ferens</surname></persName>
		</author>
		<idno type="DOI">10.1088/1755-1315/628/1/012027</idno>
	</analytic>
	<monogr>
		<title level="j">IOP Conference Series: Earth and Environmental Science</title>
		<imprint>
			<biblScope unit="volume">628</biblScope>
			<biblScope unit="page">12027</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">Identifying stock market crashes by fuzzy measures of complexity</title>
		<author>
			<persName><forename type="first">A</forename><surname>Bielinskyi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Soloviev</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Semerikov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Solovieva</surname></persName>
		</author>
		<idno type="DOI">10.33111/nfmte.2021.003</idno>
	</analytic>
	<monogr>
		<title level="j">Neuro-Fuzzy Modeling Techniques in Economics</title>
		<imprint>
			<biblScope unit="volume">10</biblScope>
			<biblScope unit="page" from="3" to="45" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">The cyber-physical system for increasing the efficiency of the iron ore desliming process</title>
		<author>
			<persName><forename type="first">V</forename><surname>Morkun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Morkun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Pikilnyak</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Semerikov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Serdiuk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Gaponenko</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">CEUR Workshop Proceedings</title>
		<imprint>
			<biblScope unit="volume">2853</biblScope>
			<biblScope unit="page" from="450" to="459" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">Application of an Artificial Neural Network to Predict Graduation Success at the United States Military Academy</title>
		<author>
			<persName><forename type="first">G</forename><surname>Lesinski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Corns</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Dagli</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.procs.2016.09.348</idno>
	</analytic>
	<monogr>
		<title level="j">Procedia Computer Science</title>
		<imprint>
			<biblScope unit="volume">95</biblScope>
			<biblScope unit="page" from="375" to="382" />
			<date type="published" when="2016">2016</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">A neural network approach for assessing quality in technical education: an empirical study</title>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">S</forename><surname>Mahapatra</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">S</forename><surname>Khan</surname></persName>
		</author>
		<idno type="DOI">10.1504/IJPQM.2007.012451</idno>
	</analytic>
	<monogr>
		<title level="j">International Journal of Productivity and Quality Management</title>
		<imprint>
			<biblScope unit="volume">2</biblScope>
			<biblScope unit="page" from="287" to="306" />
			<date type="published" when="2007">2007</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">Using spreadsheets as learning tools for neural network simulation</title>
		<author>
			<persName><forename type="first">S</forename><surname>Semerikov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Teplytskyi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Yechkalo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Markova</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Soloviev</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Kiv</surname></persName>
		</author>
		<idno type="DOI">10.32919/uesit.2022.03.04</idno>
	</analytic>
	<monogr>
		<title level="j">Ukrainian Journal of Educational Studies and Information Technology</title>
		<imprint>
			<biblScope unit="volume">10</biblScope>
			<biblScope unit="page" from="42" to="68" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<analytic>
		<title level="a" type="main">Prediction of student course selection in online higher education institutes using neural network</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">A</forename><surname>Kardan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Sadeghi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">S</forename><surname>Ghidary</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">R F</forename><surname>Sani</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.compedu.2013.01.015</idno>
	</analytic>
	<monogr>
		<title level="j">Computers &amp; Education</title>
		<imprint>
			<biblScope unit="volume">65</biblScope>
			<biblScope unit="page" from="1" to="11" />
			<date type="published" when="2013">2013</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">A neural network students&apos; performance prediction model (NNSPPM)</title>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">M</forename><surname>Arsad</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Buniyamin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J.-L</forename><forename type="middle">A</forename><surname>Manan</surname></persName>
		</author>
		<idno type="DOI">10.1109/ICSIMA.2013.6717966</idno>
	</analytic>
	<monogr>
		<title level="m">IEEE International Conference on Smart Instrumentation, Measurement and Applications (ICSIMA)</title>
				<imprint>
			<date type="published" when="2013">2013. 2013</date>
			<biblScope unit="page" from="1" to="5" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<analytic>
		<title level="a" type="main">Development of a software product for forecasting the entrance of applicants to higher educational institutions</title>
		<author>
			<persName><forename type="first">V</forename><surname>Osadchyi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Kruglyk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Bukreyev</surname></persName>
		</author>
		<idno type="DOI">10.32919/uesit.2018.03.06</idno>
	</analytic>
	<monogr>
		<title level="j">Ukrainian Journal of Educational Studies and Information Technology</title>
		<imprint>
			<biblScope unit="volume">6</biblScope>
			<biblScope unit="page" from="55" to="69" />
			<date type="published" when="2018">2018</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b26">
	<analytic>
		<title level="a" type="main">A Neural Network Approach for Students&apos; Performance Prediction</title>
		<author>
			<persName><forename type="first">F</forename><surname>Okubo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Yamashita</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Shimada</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Ogata</surname></persName>
		</author>
		<idno type="DOI">10.1145/3027385.3029479</idno>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Seventh International Learning Analytics &amp; Knowledge Conference, LAK &apos;17</title>
				<meeting>the Seventh International Learning Analytics &amp; Knowledge Conference, LAK &apos;17<address><addrLine>New York, NY, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Association for Computing Machinery</publisher>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="598" to="599" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b27">
	<analytic>
		<title level="a" type="main">Predicting student performance using artificial neural network: In the faculty of engineering and information technology</title>
		<author>
			<persName><forename type="first">S</forename><surname>Naser</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Zaqout</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ghosh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Atallah</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Alajrami</surname></persName>
		</author>
		<idno type="DOI">10.14257/ijhit.2015.8.2.20</idno>
	</analytic>
	<monogr>
		<title level="j">International Journal of Hybrid Information Technology</title>
		<imprint>
			<biblScope unit="volume">8</biblScope>
			<biblScope unit="page" from="221" to="228" />
			<date type="published" when="2015">2015</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b28">
	<analytic>
		<title level="a" type="main">Application of the artificial neural networks theory in problems of applied pedagogy of higher education institutions</title>
		<author>
			<persName><forename type="first">H</forename><surname>Chaban</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Kukhtiak</surname></persName>
		</author>
		<idno type="DOI">10.32405/2411-1317-2020-1-51-56</idno>
	</analytic>
	<monogr>
		<title level="j">Ukrainian Educational Journal</title>
		<imprint>
			<biblScope unit="page" from="51" to="56" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b29">
	<analytic>
		<title level="a" type="main">Students&apos; performance prediction using data of multiple courses by recurrent neural network</title>
		<author>
			<persName><forename type="first">F</forename><surname>Okubo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Yamashita</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Shimada</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Konomi</surname></persName>
		</author>
		<ptr target="https://www.apsce.net/icce/icce2017/140.115.135.84/icce/icce2017/sites/default/files/proceedings/main/C3/Students%20Performance%20Prediction%20Using%20Data%20of%20Multiple%20Courses%20by%20Recurrent%20Neural%20Network.pdf" />
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 25th International Conference on Computers in Education, ICCE 2017 -Main Conference Proceedings, Asia-Pacific Society for Computers in Education</title>
				<editor>
			<persName><forename type="first">A</forename><forename type="middle">F</forename><surname>Mohd Ayub</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><surname>Mitrovic</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J.-C</forename><surname>Yang</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">S</forename><forename type="middle">L</forename><surname>Wong</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">W</forename><surname>Chen</surname></persName>
		</editor>
		<meeting>the 25th International Conference on Computers in Education, ICCE 2017 -Main Conference Proceedings, Asia-Pacific Society for Computers in Education</meeting>
		<imprint>
			<date type="published" when="2017">2017</date>
			<biblScope unit="page" from="439" to="444" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b30">
	<analytic>
		<title level="a" type="main">An innovative evaluation method for undergraduate education: an approach based on bp neural network and stress testing</title>
		<author>
			<persName><forename type="first">C</forename><surname>Liu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Feng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Yuling</surname></persName>
		</author>
		<idno type="DOI">10.1080/03075079.2020.1739013</idno>
	</analytic>
	<monogr>
		<title level="j">Studies in Higher Education</title>
		<imprint>
			<biblScope unit="volume">47</biblScope>
			<biblScope unit="page" from="212" to="228" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b31">
	<analytic>
		<title level="a" type="main">The technique of the use of Virtual Learning Environment in the process of organizing the future teachers&apos; terminological work by specialty</title>
		<author>
			<persName><forename type="first">V</forename><forename type="middle">V</forename><surname>Pererva</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><forename type="middle">O</forename><surname>Lavrentieva</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><forename type="middle">I</forename><surname>Lakomova</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><forename type="middle">S</forename><surname>Zavalniuk</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">T</forename><surname>Tolmachev</surname></persName>
		</author>
		<idno type="DOI">10.55056/cte.363</idno>
	</analytic>
	<monogr>
		<title level="j">CTE Workshop Proceedings</title>
		<imprint>
			<biblScope unit="volume">7</biblScope>
			<biblScope unit="page" from="321" to="346" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b32">
	<analytic>
		<title level="a" type="main">Artificial neural network analysis of the academic performance of students in virtual learning environments</title>
		<author>
			<persName><forename type="first">A</forename><surname>Rivas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>González-Briones</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Hernández</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Prieto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Chamoso</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.neucom.2020.02.125</idno>
	</analytic>
	<monogr>
		<title level="j">Neurocomputing</title>
		<imprint>
			<biblScope unit="volume">423</biblScope>
			<biblScope unit="page" from="713" to="720" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b33">
	<analytic>
		<title level="a" type="main">Exploring the Interplay of Moodle Tools and Student Learning Outcomes: A Composite-Based Structural Equation Modelling Approach</title>
		<author>
			<persName><forename type="first">L</forename><surname>Fadieieva</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Semerikov</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-031-71804-5_28</idno>
	</analytic>
	<monogr>
		<title level="m">Information Technology for Education, Science, and Technics</title>
		<title level="s">Lecture Notes on Data Engineering and Communications Technologies</title>
		<editor>
			<persName><forename type="first">E</forename><surname>Faure</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Y</forename><surname>Tryus</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">T</forename><surname>Vartiainen</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">O</forename><surname>Danchenko</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Bondarenko</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">C</forename><surname>Bazilo</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">G</forename><surname>Zaspa</surname></persName>
		</editor>
		<meeting><address><addrLine>Nature Switzerland; Cham</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2024">2024</date>
			<biblScope unit="volume">222</biblScope>
			<biblScope unit="page" from="418" to="435" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b34">
	<analytic>
		<title level="a" type="main">Predicting academic performance of students from VLE big data using deep learning models</title>
		<author>
			<persName><forename type="first">H</forename><surname>Waheed</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S.-U</forename><surname>Hassan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">R</forename><surname>Aljohani</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Hardman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Alelyani</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Nawaz</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.chb.2019.106189</idno>
	</analytic>
	<monogr>
		<title level="j">Computers in Human Behavior</title>
		<imprint>
			<biblScope unit="volume">104</biblScope>
			<biblScope unit="page">106189</biblScope>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b35">
	<analytic>
		<title level="a" type="main">The Potential for the Use of Deep Neural Networks in e-Learning Student Evaluation with New Data Augmentation Method</title>
		<author>
			<persName><forename type="first">A</forename><surname>Cader</surname></persName>
		</author>
		<idno type="DOI">10.1007/978-3-030-52240-7_7</idno>
	</analytic>
	<monogr>
		<title level="m">Artificial Intelligence in Education</title>
				<editor>
			<persName><forename type="first">I</forename><forename type="middle">I</forename><surname>Bittencourt</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Cukurova</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">K</forename><surname>Muldner</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">R</forename><surname>Luckin</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">E</forename><surname>Millán</surname></persName>
		</editor>
		<meeting><address><addrLine>Cham</addrLine></address></meeting>
		<imprint>
			<publisher>Springer International Publishing</publisher>
			<date type="published" when="2020">2020</date>
			<biblScope unit="page" from="37" to="42" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b36">
	<analytic>
		<title level="a" type="main">A neuro-fuzzy approach in the classification of students&apos; academic performance</title>
		<author>
			<persName><forename type="first">Q</forename><forename type="middle">H</forename><surname>Do</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J.-F</forename><surname>Chen</surname></persName>
		</author>
		<idno type="DOI">10.1155/2013/179097</idno>
	</analytic>
	<monogr>
		<title level="j">Computational intelligence and neuroscience</title>
		<imprint>
			<biblScope unit="volume">2013</biblScope>
			<biblScope unit="page">179097</biblScope>
			<date type="published" when="2013">2013</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b37">
	<analytic>
		<title level="a" type="main">User/tutor optimal learning path in e-learning using comprehensive neuro-fuzzy approach</title>
		<author>
			<persName><forename type="first">H</forename><surname>Fazlollahtabar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Mahdavi</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.edurev.2009.02.001</idno>
	</analytic>
	<monogr>
		<title level="j">Educational Research Review</title>
		<imprint>
			<biblScope unit="volume">4</biblScope>
			<biblScope unit="page" from="142" to="155" />
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b38">
	<analytic>
		<title level="a" type="main">An adaptive neuro-fuzzy model for prediction of student&apos;s academic performance</title>
		<author>
			<persName><forename type="first">O</forename><surname>Taylan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Karagözoğlu</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.cie.2009.01.019</idno>
	</analytic>
	<monogr>
		<title level="j">Computers &amp; Industrial Engineering</title>
		<imprint>
			<biblScope unit="volume">57</biblScope>
			<biblScope unit="page" from="732" to="741" />
			<date type="published" when="2009">2009</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b39">
	<analytic>
		<title level="a" type="main">Professional training of experts in the field of education in Slavic Eastern European countries</title>
		<author>
			<persName><forename type="first">I</forename><surname>Tryhub</surname></persName>
		</author>
		<ptr target="https://tinyurl.com/4vb4ysub" />
	</analytic>
	<monogr>
		<title level="j">Pedagogical process: theory and practice</title>
		<imprint>
			<biblScope unit="volume">4</biblScope>
			<biblScope unit="page" from="78" to="78" />
			<date type="published" when="2016">2016</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b40">
	<monogr>
		<author>
			<persName><forename type="first">Verkhovna</forename><surname>Rada Of Ukraine</surname></persName>
		</author>
		<ptr target="https://zakon.rada.gov.ua/laws/show/z0880-19#Text" />
		<title level="m">Regulations on the accreditation of educational programs, which provide training for higher education</title>
				<imprint>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
