<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">Ball and Player Detection in Futsal Videos Using YOLOv8 Model *</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Shohruh</forename><surname>Begmatov</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">after Muhammad al-Khwarizmi</orgName>
								<orgName type="institution">Tashkent University of Information Technologies named</orgName>
								<address>
									<addrLine>108 Amir Temur St</addrLine>
									<postCode>100084</postCode>
									<settlement>Tashkent</settlement>
									<country key="UZ">Uzbekistan</country>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="department">IVUS2024: Information Society</orgName>
								<orgName type="institution">University Studies</orgName>
								<address>
									<addrLine>2024, May 17</addrLine>
									<settlement>Kaunas</settlement>
									<country key="LT">Lithuania</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Mukhriddin</forename><surname>Arabboev</surname></persName>
							<email>mukhriddin.9207@gmail.com</email>
							<affiliation key="aff0">
								<orgName type="department">after Muhammad al-Khwarizmi</orgName>
								<orgName type="institution">Tashkent University of Information Technologies named</orgName>
								<address>
									<addrLine>108 Amir Temur St</addrLine>
									<postCode>100084</postCode>
									<settlement>Tashkent</settlement>
									<country key="UZ">Uzbekistan</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Mokhirjon</forename><surname>Rikhsivoev</surname></persName>
							<email>mrikhsivoev@gmail.com</email>
							<affiliation key="aff0">
								<orgName type="department">after Muhammad al-Khwarizmi</orgName>
								<orgName type="institution">Tashkent University of Information Technologies named</orgName>
								<address>
									<addrLine>108 Amir Temur St</addrLine>
									<postCode>100084</postCode>
									<settlement>Tashkent</settlement>
									<country key="UZ">Uzbekistan</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Saidakmal</forename><surname>Saydiakbarov</surname></persName>
							<email>saidakmalflash@gmail.com</email>
							<affiliation key="aff0">
								<orgName type="department">after Muhammad al-Khwarizmi</orgName>
								<orgName type="institution">Tashkent University of Information Technologies named</orgName>
								<address>
									<addrLine>108 Amir Temur St</addrLine>
									<postCode>100084</postCode>
									<settlement>Tashkent</settlement>
									<country key="UZ">Uzbekistan</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Zukhriddin</forename><surname>Khamidjonov</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">after Muhammad al-Khwarizmi</orgName>
								<orgName type="institution">Tashkent University of Information Technologies named</orgName>
								<address>
									<addrLine>108 Amir Temur St</addrLine>
									<postCode>100084</postCode>
									<settlement>Tashkent</settlement>
									<country key="UZ">Uzbekistan</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Sardor</forename><surname>Vakhkhobov</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">after Muhammad al-Khwarizmi</orgName>
								<orgName type="institution">Tashkent University of Information Technologies named</orgName>
								<address>
									<addrLine>108 Amir Temur St</addrLine>
									<postCode>100084</postCode>
									<settlement>Tashkent</settlement>
									<country key="UZ">Uzbekistan</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Khurshid</forename><surname>Aliyarov</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">after Muhammad al-Khwarizmi</orgName>
								<orgName type="institution">Tashkent University of Information Technologies named</orgName>
								<address>
									<addrLine>108 Amir Temur St</addrLine>
									<postCode>100084</postCode>
									<settlement>Tashkent</settlement>
									<country key="UZ">Uzbekistan</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Oybek</forename><surname>Karimov</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">after Muhammad al-Khwarizmi</orgName>
								<orgName type="institution">Tashkent University of Information Technologies named</orgName>
								<address>
									<addrLine>108 Amir Temur St</addrLine>
									<postCode>100084</postCode>
									<settlement>Tashkent</settlement>
									<country key="UZ">Uzbekistan</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">Ball and Player Detection in Futsal Videos Using YOLOv8 Model *</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">C8BDD5EB506CA48C2CABD0D2A74A74B6</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2025-04-23T16:28+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>YOLOv8</term>
					<term>Roboflow</term>
					<term>ball detection</term>
					<term>player detection</term>
					<term>futsal</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>There has been a significant increase in people's interest and enthusiasm for sports in recent years. This has resulted in an increased emphasis on high-quality video recording of various sports to capture even the smallest details. Recording and analysis have become extremely crucial in sports such as futsal, which involve several complex and fast events. Ball detection and tracking, along with player analysis, have emerged as areas of interest among many analysts and researchers. Coaches rely on video analysis to assess their team's performance and make informed decisions to achieve better results. Furthermore, coaches and sports scouts can use this tool to scout for talented players by reviewing their past games. Ball detection is vital in aiding referees to make correct decisions during critical moments of a game. However, due to the continuous movement of the ball, its shape and appearance change over time, and it often gets blocked by players, making it challenging to track its position throughout the game. This paper proposes a deep learning-based YOLOv8 model for detecting balls and players in broadcast futsal videos.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>Over the last decade, numerous studies have been conducted worldwide on the development of computer vision and artificial intelligence technologies in sports. In <ref type="bibr" target="#b0">[1]</ref>, it is presented a detailed overview of sports video analysis, covering various applications. These include high-level analyses such as player detection and classification, player or ball tracking, prediction of player or ball trajectories, recognition of team strategies, and classification of various events in sports. In <ref type="bibr" target="#b1">[2]</ref>, it is focused on using artificial intelligence techniques in athlete monitoring applications, including machine learning, deep learning, and natural language processing usage cases. In <ref type="bibr" target="#b2">[3]</ref>, it is considered the task of detecting the players and sports balls in real-world handball images, as a building block for action recognition. In <ref type="bibr" target="#b3">[4]</ref>, it is proposed a deep learning-based player tracking system to automatically track players and index their participation per play in American football games. In <ref type="bibr" target="#b4">[5]</ref>, it is focused on image and video content analysis of handball scenes and applying deep learning methods for detecting and tracking the players and recognizing their activities. In the study, for the task of player and ball detection, the YOLOv7 model is used. In <ref type="bibr" target="#b5">[6]</ref>, it is investigated object tracking techniques for the paralympic team sport named goalball. In the study, different tracking methods have been implemented and compared, evaluating prediction accuracy and performance speed in players and the ball tracking. In <ref type="bibr" target="#b6">[7]</ref>, it is proposed a machine learning-based analysis of badminton videos, utilizing two deep learning models, TrackNet and YOLOv5, to predict shuttlecock trajectories, track players, and detect different shot types. The study involved the following steps: First, custom-collected datasets from both smartphonerecorded videos and online YouTube videos of badminton matches were labelled. These labelled datasets were then processed and used to train the machine-learning models. Finally, to evaluate the performance of the TrackNet and YOLOv5 models, a separate testing dataset was used. In <ref type="bibr" target="#b7">[8]</ref>, it is presented the application of deep learning methods in sports scenes to detect and track athletes and recognise their activities. In the study, the scenes recorded during handball games and training activities will be used as an example. Another interesting study found in <ref type="bibr" target="#b8">[9]</ref>. The study is devoted to basketball action recognition based on the combination of YOLO and a deep fuzzy LSTM network. In the study, the proposed model was validated on SpaceJam and Basketball-51 datasets. In <ref type="bibr" target="#b9">[10]</ref>, the YOLOv7 and YOLOv7_tiny models are presented for soccer-ball multi-detection with DeepSORT for tracking by a semi-supervised system. In <ref type="bibr" target="#b10">[11]</ref>, it is explored the potential of artificial intelligence in football. In <ref type="bibr" target="#b11">[12]</ref>, it is presented football player performance analysis using particle swarm optimization and player value calculation using regression. In <ref type="bibr" target="#b12">[13]</ref>, it is developed a novel machine learning approach to predict the likelihood of a team attempting to score during a segment of the match. In <ref type="bibr" target="#b13">[14]</ref>, it is proposed a model of automated detection and classification of soccer field objects using YOLOv7 and computer vision techniques. In <ref type="bibr" target="#b14">[15]</ref>, an efficient deep convolutional neural network-based method is proposed to automatically detect football players from video matches directly. In <ref type="bibr" target="#b15">[16]</ref>, it is proposed deep learning based automated sports video summarization using YOLO. In the study, a database consisting of 1300 images was used to train (using transfer learning) a supervised-learning based object detection algorithm. Deep learning has emerged as a key area for sports analysis, particularly in the field of multi-object detection. A study <ref type="bibr" target="#b16">[17]</ref> has shown that YOLOv7 models can be adapted for the multi-detection of soccer balls, demonstrating their effectiveness for ball tracking. This finding suggests that YOLOv8 has the potential to achieve similar success in identifying both balls and players in futsal videos.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.1.">The Rise of Futsal and the Need for Advanced Video Analysis</head><p>Futsal, a fast-paced and skillful variant of soccer played on a smaller court, has witnessed a remarkable surge in popularity worldwide in recent years <ref type="bibr" target="#b0">[1,</ref><ref type="bibr" target="#b17">18]</ref>. This growth has led to a growing demand for advanced video analysis tools that are specifically designed for futsal videos. Unlike traditional analysis methods used in other sports, video analysis tailored for futsal offers unique advantages for various stakeholders:</p><p>Improved Coaching Strategies: By analyzing game footage, coaches can gain deeper insights into their team's performance. This includes understanding team formations, player positioning, and passing patterns. By identifying strengths and weaknesses, coaches can develop more effective tactics and training strategies to optimize team performance <ref type="bibr" target="#b18">[19]</ref>.</p><p>Objective Performance Evaluation: Coaches can objectively assess individual and team performance by analyzing player movements and interactions with the ball through video analysis. This allows for targeted feedback for players, highlighting areas for improvement and tracking their skill development over time.</p><p>Targeted Training Drills: Video analysis can be a powerful tool for designing targeted training drills. By identifying specific weaknesses in-game footage, coaches can create drills that address those areas, leading to more efficient skill development for individual players and the entire team.</p><p>Future of Officiating: While still under development, real-time video analysis has the potential to assist referees in making close calls during matches. This could lead to improved officiating accuracy and fairer outcomes in the future <ref type="bibr" target="#b18">[19]</ref>.</p><p>These benefits highlight the potential of video analysis specifically designed for futsal videos. By leveraging this technology, coaches, players, and referees can gain valuable insights that traditional methods cannot offer, ultimately leading to a more strategic, data-driven approach to the sport.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.2.">Unique Challenges of Futsal Videos for Ball and Player Detection</head><p>It is important to note that traditional computer vision techniques used in other sports video analysis may not be sufficient for futsal due to the unique characteristics of the game. These characteristics present distinct challenges for object detection and tracking algorithms. The challenges include:</p><p>Increased Player Density: The smaller court size in futsal leads to frequent player-ball occlusions, where players obstruct the view of the ball. This demands object detection models that can effectively identify and differentiate between players and the ball, even when partially hidden behind each other <ref type="bibr" target="#b18">[19]</ref>.</p><p>Rapid Ball Movement: Futsal is played at a faster pace compared to soccer. The ball experiences swift and unpredictable changes in direction and speed, requiring object-tracking algorithms that can accurately follow the ball's trajectory despite these dynamic movements.</p><p>Emphasis on Ball Control: Unlike soccer, where the ball often spends significant time in the air, futsal emphasizes close control and passing. By analyzing ball movement patterns in futsal videos, valuable insights can be gleaned into player performance and game tactics, such as dribbling skills and passing accuracy <ref type="bibr" target="#b19">[20]</ref>.</p><p>In the following sections we will delve deeper into these challenges, explore existing video analysis methods, and introduce our proposed approach utilizing YOLOv8, a deep learning model well-suited for addressing the unique demands of futsal video analysis.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.3.">YOLOv8: Addressing the Challenges of Futsal Video Analysis</head><p>As per our earlier discussion, traditional methods of analyzing futsal videos struggle to overcome the unique challenges presented by this sport. However, YOLOv8, a state-of-the-art deep learning model, has shown promising results in tackling these challenges. YOLOv8 has specific strengths that make it well-suited for futsal video analysis, such as real-time processing, robust object detection, and bounding boxes for detailed analysis. Real-time processing allows for near-instantaneous analysis of game footage, providing coaches with immediate feedback and potentially enabling them to make tactical adjustments during matches <ref type="bibr" target="#b20">[21]</ref>. Additionally, real-time analysis could assist referees in making close calls, leading to fairer outcomes. YOLOv8's advanced convolutional neural network architecture helps it handle frequent player and ball occlusions commonly encountered in futsal videos. This robust object detection capability is crucial for accurately identifying and differentiating between players and the ball, even when partially hidden behind each other <ref type="bibr" target="#b20">[21]</ref>. YOLOv8 outputs bounding boxes around detected objects, players, and the ball. These bounding boxes help track the ball's position and movement throughout the game, allowing for detailed analysis of player control and passing accuracy techniques. Coaches can assess a player's ability to maintain close control during dribbling or tight spaces by analyzing the size and movement of the bounding boxes around the ball. Similarly, coaches can evaluate passing accuracy and effectiveness by tracking the trajectory of the ball and its relationship to the receiving player's bounding box. Leveraging these strengths of YOLOv8, our research aims to develop a robust ball and player detection system specifically tailored for the unique challenges of futsal video analysis. This system can potentially contribute to advancements in various aspects of the sport for different stakeholders. There are Player Training, Tactical Analysis, and Officiating Support.</p><p>Player training coaches can design targeted training drills using insights gained from analyzing player movement and ball control with YOLOv8. With YOLOv8, players can receive personalized feedback on their performance by analyzing their movement patterns and interactions with the ball. This allows players to identify areas for improvement and focus their training efforts more effectively. In tactical analysis, coaches can develop more effective tactics by analyzing team formations, player positioning, and passing patterns revealed by YOLOv8. YOLOv8 can also be used to analyze opponent tactics by studying their formations and player movements through video analysis. This allows coaches to develop counter-strategies and gain a competitive edge. In officiating support, real-time video analysis using YOLOv8 could potentially assist referees in making close calls, leading to improved officiating accuracy. For example, YOLOv8 could be used to analyze close calls involving potential fouls or out-of-bounds situations, providing referees with additional information to make informed decisions. In the field of futsal video analysis, using YOLOv8 technology provides a significant advantage over traditional methods. This technology has the potential to revolutionize the way coaches, players, and referees approach the game by enabling a more data-driven and strategic approach. As deep learning models like YOLOv8 continue to evolve, the possibilities for enhancing futsal through video analysis are truly exciting. This study aims to develop a model for player and ball detection in futsal videos using YOLOv8, specifically in the segmentation of players based on different body parts such as the knee, neck, and elbow.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.4.">Segmentation Methods for Futsal Video Analysis</head><p>Analyzing futsal videos can be challenging due to factors such as the high density of players, rapid ball movement, and the emphasis on close ball control. To overcome these challenges, segmentation methods play a crucial role in separating players, the ball, and other relevant objects from the background. In this context, we will explore various segmentation approaches suitable for futsal video analysis, including Deep Learning-based and Traditional Segmentation methods. Real-time processing for immediate results. Balanced accuracy and speed for initial player/ball detection <ref type="bibr" target="#b21">[22]</ref>.</p><p>Struggles with occlusions (partially hidden players/ball) <ref type="bibr" target="#b22">[23]</ref>. Misled by background clutter (spectators, advertisements) <ref type="bibr" target="#b23">[24]</ref>.</p><p>Suitable for initial detection due to real-time processing.</p><p>May require additional techniques for occlusion handling (e.g., pose estimation) <ref type="bibr" target="#b21">[22,</ref><ref type="bibr" target="#b22">23,</ref><ref type="bibr" target="#b23">24]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Semantic Segmentation</head><p>Precise identification of various regions (court, players, ball, background) <ref type="bibr" target="#b21">[22]</ref>. Improved potential for occlusion handling by analyzing surrounding pixels <ref type="bibr" target="#b22">[23]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Generally slower processing compared to</head><p>YOLOv8.</p><p>Well-suited for detailed scene understanding and potentially better occlusion handling. Consider the trade-off between processing speed and accuracy requirements <ref type="bibr" target="#b21">[22,</ref><ref type="bibr" target="#b22">23]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Instance Segmentation</head><p>Builds upon semantic segmentation to assign unique labels to individual objects (e.g., tracking specific players) <ref type="bibr" target="#b23">[24]</ref>.</p><p>Computationally expensive compared to other methods.</p><p>Valuable for tracking specific player movements and interactions <ref type="bibr" target="#b23">[24]</ref>.</p><p>Thresholding A simple and fast method for highcontrast scenarios.</p><p>Limited effectiveness in complex lighting or overlapping objects.</p><p>Potentially useful for basic segmentation in controlled environments <ref type="bibr" target="#b24">[25]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Clustering</head><p>Groups pixels based on shared features (color, intensity).</p><p>Requires careful parameter tuning and may struggle with complex scenes.</p><p>Might be exploratory for segmenting regions with similar properties <ref type="bibr" target="#b25">[26]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Morphological Operations</head><p>Useful for noise reduction and object boundary refinement.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Often used in conjunction with other segmentation methods.</head><p>Can be integrated with other methods to enhance segmentation results <ref type="bibr" target="#b26">[27]</ref>.</p><p>Table <ref type="table" target="#tab_0">1</ref> provides a comprehensive overview of traditional and deep-learning-based segmentation methods, including thresholding, clustering, and morphological operations.</p><p>Section 2 provides an overview of existing research, while Section 3 proposes an artificial intelligence model for whole-body segmentation of players in futsal videos. Section 4 proposes an artificial intelligence model for the segmentation of players in futsal videos by different body parts. Finally, Section 5 concludes the paper.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Related Work</head><p>In this section, an overview of existing research on ball and player detection in futsal videos using AI models is given. In recent years, various studies have been conducted on the development of ball and player detection, and game analysis in futsal due to recent advancements in science. In <ref type="bibr" target="#b27">[28]</ref>, it is proposed a multiple-camera methodology for automatic localization and tracking of futsal players. The study presents an automated method for estimating the positions of futsal players as probability distributions through the use of multiple cameras and particle filters, thereby reducing the need for human intervention. In their framework, each player position is defined as a non-parametric distribution, which is tracked using particle filters. The authors used information from multiple cameras to create an observation model, which is a probability distribution function that describes the likely positions of players in the court plane, at each frame. To reduce human intervention, it addresses player confusion during tracking by using an appearance model to update the observation function. The experiments carried out revealed tracking errors below 70 cm, demonstrating the potential for aiding sports teams in various technical areas.</p><p>In <ref type="bibr" target="#b28">[29]</ref>, the use of computer vision techniques for visually tracking futsal players is explored. The study utilizes adaptive background subtraction and blob analysis to detect players, along with particle filters to predict their positions and track them using data from a single stationary camera. Based on the results of their experiments, it has been shown that the proposed method is capable of accurately tracking players and calculating their movements during futsal matches. Their approach has been found to have an error rate of less than 20 cm, which demonstrates its high potential for use in a variety of futsal match analyses.</p><p>In <ref type="bibr" target="#b29">[30]</ref>, a vision-based system was introduced to aid in the tactical and physical analysis of futsal teams. This system is a simple, yet efficient solution that uses image sequences captured by a single stationary camera to obtain top-view images of the entire court. This enables a comprehensive analysis of the game and player performance. The results of experiments conducted with image sequences of an official match and a training match show that the proposed system provides accurate tracking data with global mean tracking errors below 40 cm. The system takes only 25 ms to process each frame, which demonstrates its high potential for practical application.</p><p>In <ref type="bibr" target="#b30">[31]</ref>, the applicability and reliability of using a single wide-angle lens GoPro camera for tracking and kinematics analysis of futsal players were assessed. Four digital video cameras were used to record an official game of a Brazilian professional team during the quarter-final round of the 2013 São Paulo futsal league. The cameras were placed at the highest points of the court (40 x 20 m; FIFA standard) and recorded at 30 Hz with a resolution of 720 x 480.</p><p>Finally, a method for analyzing futsal matches using computer vision was proposed in <ref type="bibr" target="#b31">[32]</ref>. Videos were recorded using a single camera with a wide-angle lens, which facilitated the installation and calibration process in different matches and arenas. This approach is demonstrated using video recordings of the Pato Futsal team. The recordings were used to identify the players, project their positions from pixels to real-world coordinates, and estimate their trajectories. The resulting data visualization is intended to assist coaches in their physical and tactical analysis.</p><p>To summarize, the overview of the previous contribution mentioned above on futsal player detection is based on camera-related approaches. However, none is based on the model developed using YOLOv8 presented in this work. In this study, a model for ball and player detection in futsal videos using the YOLOv8 algorithm and the Roboflow platform is developed.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Development of an artificial intelligence model for whole-body segmentation of players in futsal videos</head><p>This section is devoted to the creation of an artificial intelligence model and dataset <ref type="bibr" target="#b32">[33]</ref> that can do whole-body segmentation of players in futsal videos. The Roboflow platform was used to create a dataset of whole-body segmentation of players in futsal videos. Roboflow is one of the most popular platforms that provides tools for managing and deploying computer vision models. The YOLOv8 algorithm was used to develop an artificial intelligence model for whole-body segmentation of players in futsal videos. YOLOv8 is an advanced computer vision model created by Ultralytics, representing the most up-to-date technology in this field. YOLOv8 is suitable for a wide range of object detection and tracking, instance segmentation, image classification, and pose estimation tasks. Figure <ref type="figure" target="#fig_0">1</ref> shows a graphical representation of the whole-body segmentation of players in futsal videos.  Input frame 640x360</p><p>It can be seen from Table <ref type="table" target="#tab_1">2</ref> that the dataset developed for use in the artificial intelligence model for whole-body segmentation of players in futsal videos consists of a total of 391 images, of which 342 are the train set and 33 are the validation set and 16 are for the test set. Each input image has a size of 640x360. The artificial intelligence model for whole-body segmentation of players in futsal videos consists of 4 classes. These are: ball, player1, player2, referee.  </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Development of an artificial intelligence model for segmentation of players in futsal videos by knee, neck, and elbow parts of the body</head><p>This section is devoted to the development of an artificial intelligence model and dataset that can detect players in futsal videos by knee, neck, and elbow parts of the body. The Roboflow platform was used to create a dataset of segmentation of players in futsal videos by knee, neck, and elbow parts of the body. The YOLOv8 Large algorithm was used to develop an artificial intelligence model for segmentation of players in futsal videos by knee, neck, and elbow parts of the body. YOLOv8 Large is indeed the largest pre-trained model available in the YOLOv8 family. Figure <ref type="figure" target="#fig_3">6</ref> shows a graphical representation of the segmentation of players in futsal videos by knee, neck, and elbow parts of the body.  Input frame 1280x1280</p><p>It can be seen from Table <ref type="table" target="#tab_2">3</ref> that the dataset developed for use in artificial intelligence model for segmentation of players in futsal videos by knee, neck, and elbow parts of the body consists of a total of 388 images, of which 271 are train set, 78 of them were allocated to the validation set and 39 to the test set. Each input image has a resolution of 1280x1280. The artificial intelligence model for segmentation of players in futsal videos by knee, neck, and elbow parts of the body consists of 2 classes. These are: ball, player.   </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.">Conclusion</head><p>In conclusion, our study on ball and player detection in futsal videos using the YOLOv8 model has made significant progress in enhancing the capabilities of sports video analytics. The successful implementation and fine-tuning of YOLOv8 for the nuanced dynamics of futsal have demonstrated its effectiveness in real-time and accurate detection of both the ball and players, even in challenging scenarios such as rapid player movements and occlusions.</p><p>In our future work, we plan to create a larger dataset for ball and player detection in futsal videos. Furthermore, we will conduct new research on improving the accuracy of the model developed in this study.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Whole-body detection and redundant information in the detected area</figDesc><graphic coords="6,137.70,160.75,152.85,181.10" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 2 :Figure 3 :Figure 4 :</head><label>234</label><figDesc>Figure 2: Achieved results on player detection (differentiates each team separately), ball detection, and referee detection</figDesc><graphic coords="6,181.00,582.40,247.10,138.85" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 5 :</head><label>5</label><figDesc>Figure 5: Results obtained based on the training of the proposed model</figDesc><graphic coords="7,122.70,416.05,350.15,165.35" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Figure 6 :</head><label>6</label><figDesc>Figure 6: Detection along the knee, neck, elbow</figDesc><graphic coords="8,148.50,71.95,156.65,167.75" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Figure 7 :</head><label>7</label><figDesc>Figure 7: Results obtained based on the training of the proposed model</figDesc><graphic coords="8,107.45,503.40,394.40,196.30" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_5"><head>Figure 8 :</head><label>8</label><figDesc>Figure 8: Confusion Matrix based on the proposed model</figDesc><graphic coords="9,130.55,71.95,314.20,235.65" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_6"><head>Figure 9 :</head><label>9</label><figDesc>Figure 9: The results obtained using the proposed model, which detects players in futsal videos by knee, neck, and elbow parts of the body</figDesc><graphic coords="9,130.60,332.30,356.05,188.85" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_0"><head>Table 1</head><label>1</label><figDesc>Comparison of Segmentation Methods for Futsal Video Analysis</figDesc><table><row><cell>Method</cell><cell>Advantages</cell><cell>Limitations</cell><cell>Considerations for Futsal Analysis</cell></row><row><cell>YOLOv8 (Object</cell><cell></cell><cell></cell><cell></cell></row><row><cell>Detection)</cell><cell></cell><cell></cell><cell></cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_1"><head>Table 2 A</head><label>2</label><figDesc>dataset developed for use in an artificial intelligence model for whole-body segmentation of players in futsal videos</figDesc><table><row><cell>№</cell><cell>Name</cell><cell>Quantity/size</cell></row><row><cell>1</cell><cell>Train set</cell><cell>342</cell></row><row><cell>2</cell><cell>Validation set</cell><cell>33</cell></row><row><cell>3</cell><cell>Test set</cell><cell>16</cell></row><row><cell>4</cell><cell></cell><cell></cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_2"><head>Table 3</head><label>3</label><figDesc>A dataset developed for use in an artificial intelligence model for segmentation of players in futsal videos by knee, neck, and elbow parts of the body</figDesc><table><row><cell>№</cell><cell>Name</cell><cell>Quantity/size</cell></row><row><cell>1</cell><cell>Train set</cell><cell>271</cell></row><row><cell>2</cell><cell>Valid set</cell><cell>78</cell></row><row><cell>3</cell><cell>Test set</cell><cell>39</cell></row><row><cell>4</cell><cell></cell><cell></cell></row></table></figure>
		</body>
		<back>
			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">A Comprehensive Review of Computer Vision in Sports: Open Issues</title>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">T</forename><surname>Naik</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">F</forename><surname>Hashmi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">D</forename><surname>Bokde</surname></persName>
		</author>
		<idno type="DOI">10.3390/app12094429</idno>
	</analytic>
	<monogr>
		<title level="j">Future Trends and Research Directions</title>
		<imprint>
			<biblScope unit="volume">12</biblScope>
			<biblScope unit="issue">9</biblScope>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
	<note>Appl. Sci.</note>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">Comparative analysis of AI methods for athletes training</title>
		<author>
			<persName><forename type="first">M</forename><surname>Rikhsivoev</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Arabboev</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Begmatov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Saydiakbarov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Aliyarov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Nosirov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Khamidjonov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Vakhkhobov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Bull. TUIT Manag. Commun. Technol</title>
		<imprint>
			<biblScope unit="volume">4</biblScope>
			<biblScope unit="issue">21</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Adapting YOLO Network for Ball and Player Detection</title>
		<author>
			<persName><forename type="first">M</forename><surname>Burić</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Pobar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ivašić-Kos</surname></persName>
		</author>
		<idno type="DOI">10.5220/0007582008450851</idno>
	</analytic>
	<monogr>
		<title level="j">Int. Conf. Pattern Recognit. Appl. Methods</title>
		<imprint>
			<biblScope unit="volume">1</biblScope>
			<biblScope unit="page" from="845" to="851" />
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Automated player identification and indexing using two-stage deep learning network</title>
		<author>
			<persName><forename type="first">H</forename><surname>Liu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Adreon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Wagnon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">L</forename><surname>Bamba</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Liu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Maccall</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Yu</forename><surname>Gan</surname></persName>
		</author>
		<idno type="DOI">10.1038/s41598-023-36657-5</idno>
	</analytic>
	<monogr>
		<title level="j">Sci. Rep</title>
		<imprint>
			<biblScope unit="volume">13</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="1" to="11" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Analysis of Movement and Activities of Handball Players Using Deep Neural Networks</title>
		<author>
			<persName><forename type="first">K</forename><surname>Host</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Pobar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ivasic-Kos</surname></persName>
		</author>
		<idno type="DOI">10.3390/jimaging9040080</idno>
	</analytic>
	<monogr>
		<title level="j">J. Imaging</title>
		<imprint>
			<biblScope unit="volume">9</biblScope>
			<biblScope unit="issue">4</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Matusevicius, Multiple object tracking for video-based sports analysis</title>
		<author>
			<persName><forename type="first">J</forename><surname>Gudauskas</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ž</forename></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">CEUR Workshop Proc</title>
				<imprint>
			<date type="published" when="2021">2021</date>
			<biblScope unit="volume">2915</biblScope>
			<biblScope unit="page" from="1" to="10" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<monogr>
		<title level="m" type="main">Towards Machine Learning Framework for Badminton Game Analysis Using TrackNet and YOLO Models</title>
		<author>
			<persName><forename type="first">A</forename><surname>Mohamed</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
		<respStmt>
			<orgName>Iowa State University</orgName>
		</respStmt>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Application of Deep Learning Methods for Detection and Tracking of Players</title>
		<author>
			<persName><forename type="first">M</forename><surname>Ivasic-Kos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Host</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Pobar</surname></persName>
		</author>
		<idno type="DOI">10.5772/intechopen.96308</idno>
	</analytic>
	<monogr>
		<title level="m">Deep Learning Applications</title>
				<editor>
			<persName><forename type="first">P</forename></persName>
		</editor>
		<editor>
			<persName><forename type="first">Luigi</forename><surname>Mazzeo</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">P</forename><surname>Spagnolo</surname></persName>
		</editor>
		<imprint>
			<publisher>IntechOpen</publisher>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">Basketball action recognition based on the combination of YOLO and a deep fuzzy LSTM network</title>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">B</forename><surname>Khobdeh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">R</forename><surname>Yamaghani</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">K</forename><surname>Sareshkeh</surname></persName>
		</author>
		<idno type="DOI">10.1007/s11227-023-05611-7</idno>
	</analytic>
	<monogr>
		<title level="j">J. Supercomput</title>
		<imprint>
			<biblScope unit="volume">80</biblScope>
			<biblScope unit="issue">3</biblScope>
			<biblScope unit="page" from="3528" to="3553" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Adaptation of YOLOv7 and YOLOv7_tiny for Soccer-Ball Multi-Detection with DeepSORT for Tracking by Semi-Supervised System</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">A</forename><surname>Vicente-Martínez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Márquez-Olivera</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>García-Aliaga</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Hernández-Herrera</surname></persName>
		</author>
		<idno type="DOI">10.3390/s23218693</idno>
	</analytic>
	<monogr>
		<title level="j">Sensors</title>
		<imprint>
			<biblScope unit="volume">23</biblScope>
			<biblScope unit="issue">21</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">Artificial Intelligence in Performance Analysis of Football</title>
		<author>
			<persName><forename type="first">K</forename><surname>Aliyarov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Rikhsivoev</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Arabboev</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Begmatov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Saydiakbarov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Nosirov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Khamidjonov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Vakhkhobov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Bull. TUIT Manag. Commun. Technol</title>
		<imprint>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="issue">19</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">Football Player Performance Analysis using Particle Swarm Optimization and Player Value Calculation using Regression</title>
		<author>
			<persName><forename type="first">A</forename><surname>Jana</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Hemalatha</surname></persName>
		</author>
		<idno type="DOI">10.1088/1742-6596/1911/1/012011</idno>
	</analytic>
	<monogr>
		<title level="j">J. Phys. Conf. Ser</title>
		<imprint>
			<biblScope unit="volume">1911</biblScope>
			<biblScope unit="issue">1</biblScope>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">Machine Learning Enabled Team Performance Analysis in the Dynamical Environment of Soccer</title>
		<author>
			<persName><forename type="first">S</forename><surname>Kusmakar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Shelyag</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Zhu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Dwyer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Gastin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Angelova</surname></persName>
		</author>
		<idno type="DOI">10.1109/ACCESS.2020.2992025</idno>
	</analytic>
	<monogr>
		<title level="j">IEEE Access</title>
		<imprint>
			<biblScope unit="volume">8</biblScope>
			<biblScope unit="page" from="90266" to="90279" />
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Automated Detection and Classification of Soccer Field Objects using YOLOv7 and Computer Vision Techniques</title>
		<author>
			<persName><forename type="first">J</forename><surname>Abukhait</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Alaqtash</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Aljaafreh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Othman</surname></persName>
		</author>
		<idno type="DOI">10.14569/IJACSA.2023.0141191</idno>
	</analytic>
	<monogr>
		<title level="j">Int. J. Adv. Comput. Sci. Appl</title>
		<imprint>
			<biblScope unit="volume">14</biblScope>
			<biblScope unit="issue">11</biblScope>
			<biblScope unit="page" from="894" to="902" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<analytic>
		<title level="a" type="main">Deep Learning-Based Football Player Detection in Videos</title>
		<author>
			<persName><forename type="first">T</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Li</surname></persName>
		</author>
		<idno type="DOI">10.1155/2022/3540642</idno>
	</analytic>
	<monogr>
		<title level="j">Comput. Intell. Neurosci</title>
		<imprint>
			<biblScope unit="volume">2022</biblScope>
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">Video Summarization for Multiple Sports Using Deep Learning</title>
		<author>
			<persName><forename type="first">C</forename><surname>Guntuboina</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Porwal</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Jain</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Shingrakhia</surname></persName>
		</author>
		<idno type="DOI">10.5565/rev/elcvia.1286</idno>
	</analytic>
	<monogr>
		<title level="j">ELCVIA Electron. Lett. Comput. Vis. Image Anal</title>
		<imprint>
			<biblScope unit="volume">20</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="99" to="116" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">Adaptation of YOLOv7 and YOLOv7_tiny for Soccer-Ball Multi-Detection with DeepSORT for Tracking by Semi-Supervised System</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">A</forename><surname>Vicente-Martínez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Márquez-Olivera</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>García-Aliaga</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Hernández-Herrera</surname></persName>
		</author>
		<idno type="DOI">10.3390/s23218693</idno>
	</analytic>
	<monogr>
		<title level="j">Sensors</title>
		<imprint>
			<biblScope unit="volume">23</biblScope>
			<biblScope unit="issue">21</biblScope>
			<biblScope unit="page">8693</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<analytic>
		<title level="a" type="main">A Literature Review on Performance Analysis in Futsal Using Information and Communication Technologies</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">C</forename><surname>Duarte</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Carmona-Lobo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">C</forename><surname>Burgos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Iglesias-Soler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">M</forename><surname>Bravo</surname></persName>
		</author>
		<idno type="DOI">10.3390/s19071615</idno>
	</analytic>
	<monogr>
		<title level="j">Sensors</title>
		<imprint>
			<biblScope unit="volume">19</biblScope>
			<biblScope unit="issue">7</biblScope>
			<biblScope unit="page">1615</biblScope>
			<date type="published" when="2019">2019</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">A Review of Automatic Video Analysis Techniques for Sport Applications</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kohler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Rios</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Toledo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Guevara</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.patrec.2020.12.021</idno>
	</analytic>
	<monogr>
		<title level="j">Pattern Recognition Letters</title>
		<imprint>
			<biblScope unit="volume">140</biblScope>
			<biblScope unit="page" from="145" to="158" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">Futsal Performance Analysis through Player and Ball Tracking using Deep Learning</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">J</forename><surname>Fernandes</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">S</forename><surname>Santos</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">L</forename><surname>Teixeira</surname></persName>
		</author>
		<idno type="DOI">10.3390/s23042224</idno>
	</analytic>
	<monogr>
		<title level="j">Sensors</title>
		<imprint>
			<biblScope unit="volume">23</biblScope>
			<biblScope unit="issue">4</biblScope>
			<biblScope unit="page">2224</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<monogr>
		<author>
			<persName><forename type="first">A</forename><surname>Bochkovskiy</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C.-Y</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H.-Y</forename><forename type="middle">M</forename><surname>Liao</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2007.11883</idno>
		<title level="m">YOLOv8: Detecting Objects in Real-Time</title>
				<imprint>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b21">
	<monogr>
		<author>
			<persName><forename type="first">J</forename><surname>Redmon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Bochkovskiy</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2008.04110</idno>
		<title level="m">YOLOv8: Detecting objects in real time</title>
				<imprint>
			<date type="published" when="2020">2020</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">Object detection in sports videos</title>
		<author>
			<persName><forename type="first">M</forename><surname>Buric</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Pobar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ivasic-Kos</surname></persName>
		</author>
		<idno type="DOI">10.23919/MIPRO.2018.8400189</idno>
	</analytic>
	<monogr>
		<title level="j">Int. Conv. Inf. Commun. Technol. Electron. Microelectron. MIPRO</title>
		<imprint>
			<biblScope unit="volume">2018</biblScope>
			<biblScope unit="page" from="1034" to="1039" />
			<date type="published" when="2028">2028</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<analytic>
		<title level="a" type="main">Deep Learning Image Analysis System on Embedded Platform</title>
		<author>
			<persName><forename type="first">H</forename><surname>Song</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><forename type="middle">K</forename><surname>Choi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">S</forename><surname>Ko</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Yoo</surname></persName>
		</author>
		<idno type="DOI">10.1109/ICUFN57995.2023.10199407</idno>
	</analytic>
	<monogr>
		<title level="m">Int. Conf. Ubiquitous Futur. Networks, ICUFN</title>
				<imprint>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="911" to="913" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">A threshold selection method from gray-level histograms</title>
		<author>
			<persName><forename type="first">N</forename><surname>Otsu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE transactions on automatic control</title>
		<imprint>
			<biblScope unit="volume">24</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="62" to="66" />
			<date type="published" when="1979">1979</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<analytic>
		<title level="a" type="main">Data clustering: 50 years beyond K-means</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">K</forename><surname>Jain</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Pattern recognition letters</title>
		<imprint>
			<biblScope unit="volume">31</biblScope>
			<biblScope unit="issue">8</biblScope>
			<biblScope unit="page" from="651" to="666" />
			<date type="published" when="2010">2010</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b26">
	<monogr>
		<title level="m" type="main">Morphological image analysis: principles and applications</title>
		<author>
			<persName><forename type="first">S</forename><surname>Soille</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2003">2003</date>
			<publisher>Springer Science &amp; Business Media</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b27">
	<analytic>
		<title level="a" type="main">A multiple camera methodology for automatic localization and tracking of futsal players</title>
		<author>
			<persName><forename type="first">E</forename><surname>Morais</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Ferreira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">A</forename><surname>Cunha</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">M L</forename><surname>Barros</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Rocha</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Goldenstein</surname></persName>
		</author>
		<idno type="DOI">10.1016/j.patrec.2013.09.007</idno>
	</analytic>
	<monogr>
		<title level="j">Pattern Recognit. Lett</title>
		<imprint>
			<biblScope unit="volume">39</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="21" to="30" />
			<date type="published" when="2014">2014</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b28">
	<analytic>
		<title level="a" type="main">Particle Filter-Based Predictive Tracking of Futsal Players from a Single Stationary Camera</title>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">H C</forename><surname>De Padua</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">L C</forename><surname>Padua</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">T D</forename><surname>Sousa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">D A</forename><surname>Pereira</surname></persName>
		</author>
		<idno type="DOI">10.1109/SIBGRAPI.2015.10</idno>
	</analytic>
	<monogr>
		<title level="m">Brazilian Symposium of Computer Graphic and Image Processing</title>
				<imprint>
			<date type="published" when="2015">2015</date>
			<biblScope unit="page" from="134" to="141" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b29">
	<analytic>
		<title level="a" type="main">A vision-based system to support tactical and physical analyses in futsal</title>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">H C</forename><surname>De Pádua</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">L C</forename><surname>Pádua</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>De</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Pereira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">T D</forename><surname>Sousa</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">B</forename><surname>De Oliveira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">F</forename><surname>Wanner</surname></persName>
		</author>
		<idno type="DOI">10.1007/s00138-017-0849-z</idno>
	</analytic>
	<monogr>
		<title level="j">Mach. Vis. Appl</title>
		<imprint>
			<biblScope unit="volume">28</biblScope>
			<biblScope unit="issue">5-6</biblScope>
			<biblScope unit="page" from="475" to="496" />
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b30">
	<analytic>
		<title level="a" type="main">Tracking futsal players with a wide-angle lens camera: accuracy analysis of the radial distortion correction based on an improved Hough transform algorithm</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">H P</forename><surname>Vieira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">A</forename><surname>Pagnoca</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Milioni</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">A</forename><surname>Barbieri</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">P</forename><surname>Menezes</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Alvarez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">G</forename><surname>Déniz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Santana-Cedrés</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">R P</forename><surname>Santiago</surname></persName>
		</author>
		<idno type="DOI">10.1080/21681163.2015.1072055</idno>
	</analytic>
	<monogr>
		<title level="j">Comput. Methods Biomech. Biomed. Eng. Imaging Vis</title>
		<imprint>
			<biblScope unit="volume">5</biblScope>
			<biblScope unit="issue">3</biblScope>
			<biblScope unit="page" from="221" to="231" />
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b31">
	<analytic>
		<title level="a" type="main">Analysis of futsal matches using a single-camera computer vision system</title>
		<author>
			<persName><forename type="first">H</forename><surname>Paulichen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Zielinski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Casanova</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Cavalcanti</surname></persName>
		</author>
		<idno type="DOI">10.5753/wvc.2020.13494</idno>
	</analytic>
	<monogr>
		<title level="m">Anais do XVI Workshop de Visão Computacional</title>
				<imprint>
			<date type="published" when="2020">2020</date>
			<biblScope unit="page" from="134" to="139" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b32">
	<monogr>
		<ptr target="https://app.roboflow.com/itmade/playerdetect/deploy/12" />
		<title level="m">ITMADE, Player detection</title>
				<imprint>
			<publisher>Roboflow</publisher>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
