<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">The influence of audiovisual elements on the realism of generative AI videos: the case of Sora</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Alberto</forename><surname>Sanchez-Acedo</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Audiovisual Communication and Advertising</orgName>
								<orgName type="institution">Rey Juan Carlos University</orgName>
								<address>
									<addrLine>Camino del Molino, 5</addrLine>
									<postCode>28942</postCode>
									<settlement>Fuenlabrada</settlement>
									<region>Madrid</region>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Alejandro</forename><surname>Carbonell-Alcocer</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Audiovisual Communication and Advertising</orgName>
								<orgName type="institution">Rey Juan Carlos University</orgName>
								<address>
									<addrLine>Camino del Molino, 5</addrLine>
									<postCode>28942</postCode>
									<settlement>Fuenlabrada</settlement>
									<region>Madrid</region>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Pasquale</forename><surname>Cascarano</surname></persName>
							<affiliation key="aff1">
								<orgName type="department">Department of the Arts</orgName>
								<orgName type="institution">University of Bologna</orgName>
								<address>
									<addrLine>Via Barberia 4</addrLine>
									<postCode>4013</postCode>
									<settlement>Bologna</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Shirin</forename><surname>Hajahmadi</surname></persName>
							<affiliation key="aff2">
								<orgName type="department">Department of Computer Science and Engineering</orgName>
								<orgName type="institution">University of Bologna</orgName>
								<address>
									<addrLine>Via Mura Anteo Zamboni 7</addrLine>
									<postCode>40126</postCode>
									<settlement>Bologna</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Giacomo</forename><surname>Vallasciani</surname></persName>
							<affiliation key="aff1">
								<orgName type="department">Department of the Arts</orgName>
								<orgName type="institution">University of Bologna</orgName>
								<address>
									<addrLine>Via Barberia 4</addrLine>
									<postCode>4013</postCode>
									<settlement>Bologna</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Manuel</forename><surname>Gertrudix</surname></persName>
							<affiliation key="aff0">
								<orgName type="department">Department of Audiovisual Communication and Advertising</orgName>
								<orgName type="institution">Rey Juan Carlos University</orgName>
								<address>
									<addrLine>Camino del Molino, 5</addrLine>
									<postCode>28942</postCode>
									<settlement>Fuenlabrada</settlement>
									<region>Madrid</region>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Gustavo</forename><surname>Marfia</surname></persName>
							<affiliation key="aff1">
								<orgName type="department">Department of the Arts</orgName>
								<orgName type="institution">University of Bologna</orgName>
								<address>
									<addrLine>Via Barberia 4</addrLine>
									<postCode>4013</postCode>
									<settlement>Bologna</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">The influence of audiovisual elements on the realism of generative AI videos: the case of Sora</title>
					</analytic>
					<monogr>
						<idno type="ISSN">1613-0073</idno>
					</monogr>
					<idno type="MD5">D9F85073DAC8F3840D2EDE82F8AB2098</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2025-04-23T18:59+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Artificial Intelligence</term>
					<term>Sora</term>
					<term>Videos generated with AI</term>
					<term>Text-to-video</term>
					<term>Audiovisual analysis</term>
					<term>Experiment</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>Generative Artificial Intelligence (Gen-AI) tools are in the spotlight in every professional field. In the last decade, artificial intelligence technologies that are capable of creating content in various formats such as texts, images, audios, or videos, have emerged. Among the well known tools are those developed by OpenAI, such as ChatGPT, DALL⋅E and Sora. They can generate text, images, and videos respectively, with the help of instructions given in the form of prompts, in an accessible and efficient way. This study aims to evaluate the attraction, composition and realism of Gen-AI videos in comparison to real videos. Therefore, a quasi-experimental design is conducted using a validated survey with two groups. The experimental group contains two videos produced by Sora as stimuli, while the control group contains two real videos. The results highlight key factors influencing perceived realism, such as natural lighting, saturation, color and perspective. However, the videos that Sora can generate have such a great degree of realism in terms of audiovisual composition that it will be necessary to educate people on the subject of content generation with artificial intelligence to prevent disinformation.</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1.">Introduction</head><p>Generative Artificial Intelligence (Gen-AI) is a specialised field of Artificial Intelligence (AI) that deals with the generation of human-like texts, the creation of images from written descriptions and the production of videos based on predefined instructions <ref type="bibr" target="#b0">[1]</ref>. Today, the potential of these Gen-AI tools is the subject of considerable debate on various key issues, ranging from the quality and authenticity of the content created to the ethical implications of their use <ref type="bibr" target="#b1">[2,</ref><ref type="bibr" target="#b2">3,</ref><ref type="bibr" target="#b3">4,</ref><ref type="bibr" target="#b4">5]</ref>. However, the ability of Gen-AI to produce original materials in various forms has made a significant impact in various sectors such as creative industries, manufacturing, design, entertainment, and education <ref type="bibr" target="#b1">[2,</ref><ref type="bibr" target="#b4">5,</ref><ref type="bibr" target="#b5">6]</ref>.</p><p>Many researchers working for companies and academia have focused their efforts on developing efficient and accessible Gen-AI tools for content creation. Most notably, OpenAI's GPT series, which began with its first release in 2018 and was followed by GPT-2, GPT-3 and GPT-4, as well as its conversational variant, ChatGPT, have significantly impacted the landscape of text generation <ref type="bibr" target="#b0">[1,</ref><ref type="bibr" target="#b6">7]</ref>. GPT is built on the principles of Large Language Models (LLMs) <ref type="bibr" target="#b7">[8]</ref> which are designed to process and generate natural language text. The outstanding performances of LLMs, in synthesizing complex information, whether in the form of text or images, stems from the use of advanced techniques such as positional encoding and attention mechanisms <ref type="bibr" target="#b7">[8]</ref>. Moreover, the main core of LLMs are complex neural network architectures like Transformers which represent the state-of-the-art for numerous natural language tasks <ref type="bibr" target="#b8">[9,</ref><ref type="bibr" target="#b9">10]</ref>.</p><p>Later, in 2021, OpenAI continued to push the boundaries of generative AI by releasing DALL⋅E, a tool capable of generating images based on textual descriptions <ref type="bibr" target="#b10">[11]</ref>. While GPT focuses on generating coherent and contextually relevant texts based on input prompts, DALL⋅E integrates linguistic and visual information and extends this capability to visual content generation <ref type="bibr" target="#b10">[11]</ref>. The tool employs the same Transformer architecture as GPT-3 <ref type="bibr" target="#b10">[11]</ref>. Unlike traditional models that handle either text or images, DALL⋅E is a multimodal model <ref type="bibr" target="#b11">[12,</ref><ref type="bibr" target="#b10">11]</ref>, meaning that it can understand both types of data, integrating them in creative ways.</p><p>The achievements reached by GPT and DALL⋅E have significantly influenced the text-to-video domain, culminating in the remarkable capabilities showcased by OpenAI's Sora <ref type="bibr" target="#b12">[13,</ref><ref type="bibr" target="#b13">14,</ref><ref type="bibr" target="#b14">15]</ref> realised in 2024. Sora is an AI model capable of creating realistic and imaginative scenes from text instructions <ref type="bibr" target="#b15">[16,</ref><ref type="bibr" target="#b13">14]</ref>. Similarly to GPT and DALL⋅E, Sora can analyze text and understand intricate user directives. The process of generating videos is based on a diffusion Transformer architecture <ref type="bibr" target="#b8">[9,</ref><ref type="bibr" target="#b9">10,</ref><ref type="bibr" target="#b16">17]</ref>. This process begins with a video resembling static noise and progressively refines it by removing the noise over many steps introducing details based on the provided text prompt <ref type="bibr" target="#b16">[17]</ref>. Sora is notable for its capability to create up to 1-minute long videos, ensuring a strict adherence to user text instructions while delivering high visual quality and maintaining strong visual coherence, thus allowing users to provide visual contents from, even complex, text narratives <ref type="bibr" target="#b12">[13,</ref><ref type="bibr" target="#b15">16]</ref>. The results of the productions made with Sora are highly realistic and applicable to a multitude of professional fields <ref type="bibr" target="#b14">[15,</ref><ref type="bibr" target="#b13">14,</ref><ref type="bibr" target="#b17">18]</ref> using a prompt that can be as specific and detailed as the user wishes.</p><p>These outstanding results might become controversial. On the one hand, these Gen-AI tools offer tremendous benefits, such as enhancing creativity <ref type="bibr" target="#b18">[19]</ref>. On the other hand, since a false perception of the world is possible by making indistinguishable what is real from what has been produced with Gen-AI tools <ref type="bibr" target="#b19">[20,</ref><ref type="bibr" target="#b20">21]</ref>, they also raise concerns about misinformation leading to the spread of fake news. Therefore, social awareness is necessary for the verification of exposed stimuli <ref type="bibr" target="#b21">[22]</ref>, as well as personal judgement in recognising AI-generated materials <ref type="bibr" target="#b22">[23]</ref>. From an ethical point of view, the use of these models can lead to privacy violations, as they might inadvertently reveal sensitive information embedded in their training data. Furthermore, they can perpetuate and even amplify biases present in their training datasets, leading to unfair or discriminatory outcomes <ref type="bibr" target="#b23">[24,</ref><ref type="bibr" target="#b24">25,</ref><ref type="bibr" target="#b25">26]</ref>. The deployment of Gen-AI thus demands careful consideration of these factors, emphasizing transparency, accountability, and the implementation of robust ethical guidelines to mitigate potential harms <ref type="bibr" target="#b26">[27]</ref>.</p><p>This study aims to evaluate the attraction, composition, and realism of Gen-AI videos compared to real videos. According to the manual written by Achi <ref type="bibr" target="#b27">[28]</ref>, multimedia data must adhere to some standards in terms of audiovisual recordings, such as lighting, colour, or scale, which are the most relevant attributes for visual realism <ref type="bibr" target="#b28">[29]</ref>. As a case study, we focus on two landscape videos available on Sora's website. The first video is a recreation of Santorini (Greece), while the second one showcases the Amalfi Coast (Italy). Additionally, we consider two real videos reporting the same content. Through a detailed survey, the study measures attraction via parameters such as illumination, saturation, colourfulness, brightness, and sharpness. Composition is assessed by evaluating the video quality, the presence of shadows, focus, perspective, and shot range. Furthermore, the level of realism of the Sora videos is assessed by determining if the videos appear natural, contain fine details, and resemble drone footage, as well as whether respondents recognize the location and believe the video to be real. Finally, we identify which aspects of attraction and which compositional elements most significantly affect the perceived realism of the Gen-AI videos. For these reasons, we seek answers to the following research questions:</p><p>• R.Q.1. How do respondents perceive the attraction and composition of AI-generated videos compared to real videos depicting the same landscapes and environments? • R.Q.2. What are the key attraction and composition elements that influence the perceived realism of Gen-AI videos of landscapes?</p><p>The paper is organized as follows. Section 2 outlines the methodological design of the survey conducted. In Section 3, we analyze the obtained results. Finally, Section 4 discusses these results, offering valuable insights into the research questions and concluding with a discussion of the study's limitations.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">Research methodology</head><p>The research objective is to evaluate the attraction, composition and realism of Gen-AI videos compared to real videos, and therefore a quasi-experiment will be conducted. For this reason, an ad hoc survey including either real or Gen-AI videos of landscapes, has been developed. Since the possibilities of producing videos using Gen-AI tools are still limited and the release of Sora is imminent, two landscape videos have been selected from those available on the Sora website. The methodological approach of this research is to make a first approach in the field of Generative AI in video generation and to study how it affects its use in university students. The first is a recreation of Santorini (Greece) and the second shows the Amalfi Coast (Italy). For the selection of the real videos, a search was carried out on Youtube, selecting those that are in the same location and have similar audiovisual characteristics <ref type="bibr" target="#b29">[30,</ref><ref type="bibr" target="#b30">31]</ref>. All videos were customised to have the same duration and resolution. Figure <ref type="figure" target="#fig_0">1</ref> shows a frame for each video. In the validation process, the experts considered the videos to be similar in both similarity and format. The design of a quasi-experiment is based on the construction of two groups, a control group and an experimental group, to which a stimulus is exposed. <ref type="bibr" target="#b31">[32]</ref> The quasi-experiment design is based on the collection of information by means of a self-administered online survey <ref type="bibr" target="#b32">[33]</ref>. To ensure that the quasi-experiment design fits the research objective, it undergoes validation by expert judges (n=12). Experts in the field of computer science communication and artificial intelligence were selected for this purpose. They were provided with a guide explaining in detail the procedure and method of the experiment, as well as the questions in the questionnaire for collecting information. The purpose of this process is to ensure that there is a degree of agreement in terms of univocity and relevance. <ref type="bibr" target="#b33">[34]</ref>. The validation includes the procedure of the quasi-experiment and the questionnaire administered.</p><p>The questionnaire (see Table <ref type="table" target="#tab_0">1</ref>) is designed as an information collection system based on the validated framework proposed in <ref type="bibr" target="#b28">[29]</ref> for the human characterisation of visual realism in images. Since the questionnaire focuses on variables related to videos, the structure of the questions is modified accordingly, while the variables related to realism, attraction, and composition are maintained. The questionnaire is structured in two sections. The first section is designed to collect socio-demographic variables (P1-P5). The second section contains two videos to be evaluated independently in terms of realism, attraction and composition. In particular, the questions P6-P11 aim at evaluating the level of attraction, P12-P15 address the key elements of composition and, finally, P16-P20 focus on realism. The concepts covered by the survey were explained before the questionnaire was carried out in order to reduce bias in the interpretation of the questions. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Realism</head><p>As the chosen method is a quasi-experimental design, the survey is carried out on two groups. The survey for the control group contains two videos of real landscapes recorded by a professional as stimuli, while the survey for the experimental group contains two landscape videos produced by Gen-AI as stimuli. A non-probabilistic sample is selected as the results of the study aim to collect data to get more insights about the phenomenon of video production with Gen-AI tools. University students with a background in visual arts, theatre and music are therefore taking part in the study. Allocation to the individual groups was randomised and in proportion to each other. The data was collected in April 2024. Data was collected from n=62 participants, 28 from the control group and 34 from the experimental group. An online survey was used for data collection. All participants are young university students in the field of computer science, communication and new technologies.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.">Results</head><p>In this section, we report some statistics derived from the survey for both the control and experimental groups in order to seek for answers to the research questions R.Q.1 and R.Q.2 outilined in Section 1. The answers to the socio-demographic questions (Q1, Q2, Q3, Q4, Q5 in Table <ref type="table" target="#tab_0">1</ref>) show that the average age of the participants is 21 years, of which 68% are female and 27% are male. The 81% of respondents are not employed and 71% of them are Italian. Concerning the variables attraction, composition, and realism, the results are presented below in percentages, distinguishing between real videos and videos generated with Sora.</p><p>We first focus on the attraction variable by analyzing the answers to Q6-Q12. We found significant differences in terms of distribution between real and AI videos when evaluating the "illumination" (Q6). The bar plots are shown in Figure <ref type="figure" target="#fig_1">2</ref>. Participants consider this item, in the case of the Sora's Santorini video, to be predominantly unnatural or slightly unnatural (91%) in the case of the Santorini video by Sora, while in the case of real Santorini video, the lighting is considered more natural and only 32% of respondents consider it slightly unnatural.</p><p>For the rest of the items, such as saturation (Q7), colour (Q8), brightness (Q9) or sharpness (Q10), the differences between the real videos and Sora are more neutral and less significant. In the case of saturation, for example, participants mostly considered the four videos to be quite saturated (50% for the real video of Santorini and 44% for the Sora's one). The same applies to colour, where participants mostly rated the videos as slightly colourful, regardless of whether it was a real or AI-generated video (57% in the case of the real Amalfi video; 50% in the case of the real Santorini video; 71% in the case of the AI Amalfi video and 47% in the case of the AI Santorini video). In terms of sharpness, the majority of participants felt that all videos were neither sharp nor blurred. Regarding the quality of the videos (Q11), most of them are categorised as of medium quality, with no significant differences between the distribution of the Sora's videos and the real ones. In summary, if we compare the Sora and real videos, in the case of Santorini there are major differences, especially in the lighting, and less in the saturation or colour elements. We point out that Sora's video of Santorini stands out as very colourful and saturated compared to the other stimuli. In the case of Amalfi, the differences of the distribution between Sora and real videos in terms of saturation, colour and lighting are small.</p><p>We now focus on the composition variable by analyzing the answers to Q13-Q16 which assess the degree of focusing, the perspective and the camera distance. Concerning audiovisual features such as the degree of focusing (Q13), the majority of participants consider real videos better focus if compared to Sora's ones. More precisely, the percentage of participants perceiving the videos well focused are: 57% for the real video of Amalfi; 64% for the real video of Santorini; 41% in the video of Sora of Amalfi and 47% in the video of Sora of Santorini. Another important compositional element that was analyzed is perspective (Q14). The barplots are shown in Figure <ref type="figure" target="#fig_2">3</ref>. For the real videos, most of the respondents believe that the perspective appears natural. For the Sora videos, however, the range of answers is wider, which indicates that the perspective can be perceived as neither natural nor unnatural. Finally, concerning the camera distance (Q16) all the videos are perceived with a moderately or very distant view.</p><p>By analyzing the answers to the questions about the realism variable (Q16, Q17, Q18, Q19, Q20), it turns out that the majority of participants recognise Sora's video about Santorini as false (35%) or probably false (32%). In the case of the AI generated video of Amalfi, participants predominantly (32%) recognise it as probably true. As with the real videos shown in the control group, the majority of participants recognise the Santorini video as probably true (43%). For the Amalfi video, 36% of participants think it is probably false and 32% think it is probably true (Q20). The results are shown in Figure <ref type="figure" target="#fig_3">4</ref>. For both the real video of Amalfi and its AI-generated counterpart, participants mostly do not recognize the location. In contrast, participants do recognise the location of Santorini for the most part in both experimental groups (Q16). The results are shown in Figure <ref type="figure" target="#fig_4">5</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.">Discussion and Conclusions</head><p>The results of the experiment are based on the assumption that Gen-AI tools for video generation, in particular Sora, are capable of generating realistic content that is practically impossible to differentiate from a real video <ref type="bibr" target="#b34">[35]</ref>.</p><p>In order to answer to R.Q.1. "How do respondents perceive the attraction and composition of AI-generated videos compared to real videos depicting the same landscapes and environments?", in the Section 3 we reported the results obtained by analyzing some items of the attraction and composition variables. The results reveal distinct perceptions of attraction and composition between AI-generated and real videos, thus highlighting both technological limitations and areas of potential improvement in AI video generation. Concerning the attraction variable, we observed the lighting in AI-generated videos, particularly Sora's Santorini video, was largely deemed unnatural, in contrast with the more natural lighting perceived in real videos. Despite this, other attributes such as saturation, colour, brightness, and sharpness showed less pronounced differences, indicating that AI-generated videos can achieve a comparable aesthetic quality in these areas. These results suggest that Sora can replicate certain visual aspects, but it can struggles with replicating natural lighting, which is a critical component of realism <ref type="bibr" target="#b28">[29]</ref>. Concerning the composition, the main differences between AI-generated videos and real videos were observed regarding the perspective item. The perspective in AI videos was also perceived less consistently, often seen as neither entirely natural nor unnatural.</p><p>We now try to seek answers to R.Q.2. "What are the key attraction and composition elements that influence the perceived realism of Gen-AI videos of landscapes?". It is evident that attraction variable influences the perceived realism of Sora videos. The results indicate that participants could easily distinguish the AI-generated Santorini video due to its unnatural illumination, despite slight differences in saturation and colour. This highlights that elements such as illumination, saturation, and colour are key factors in recognizing an AI-generated video. Conversely, when these elements are closely matched between real and AI-generated videos, as seen with the Amalfi videos, it becomes more challenging for participants to identify the AI-generated content. In these cases, the majority of participants did not recognize the Sora-generated videos as artificial, suggesting that a high degree of similarity in these compositional and attraction elements can enhance the perceived realism of AI-generated videos. The key compositional elements influencing the perceived realism of Gen-AI videos of landscapes include the perspective. Specifically, the natural appearance of perspective in real videos contrasts with the broader range of perceptions for AI-generated videos, where perspectives were often seen as neither natural nor unnatural. Overall, while AI-generated videos are making strides in matching the visual appeal of real videos, significant challenges remain in achieving complete realism, particularly in aspects like lighting and focus that contribute heavily to the perceived naturalness of a scene. These insights underscore the importance of further advancements in AI video synthesis to enhance the authenticity and visual coherence of generated content.</p><p>In terms of location recognition, Sora is able to generate highly realistic videos that resemble real locations, as demonstrated in the cases of Santorini and Amalfi <ref type="bibr" target="#b20">[21,</ref><ref type="bibr" target="#b35">36]</ref>. Participants were more likely to recognize Santorini because iconic elements, such as the blue domes, were accurately recreated. In contrast, Amalfi lacks such iconic features, making it less recognizable. The era of Sora has just begun and this initial research on the realism of its videos clearly indicates that they are already difficult to distinguish from reality. However, this research suggest that for video generation to achieve results that closely mimic reality, factors such as attraction and composition must be considered must be considered to increase the level of realism. Additionally, there is a need to educate viewers on the potential for AI-generated videos. Consequently, regulations have been developed to control such content, and various studies on Sora emphasize the importance of addressing ethical risks related to misinformation <ref type="bibr" target="#b17">[18,</ref><ref type="bibr" target="#b36">37]</ref>. This study faces several limitations. First of all, the accessibility of the Sora tool. At the time of the experiment, Sora was not available to the general public and the study had to rely on default videos provided by the tool, without the ability to explore its full capabilities through detailed instructions. Furthermore, the study exclusively utilized the Sora tool, as other text-to-video AI tools were deemed less effective in producing realistic results. Consequently, Sora is currently regarded as the most efficient tool for generating highly realistic videos using artificial intelligence. Since there is no research of this kind, this experiment is a first approximation to the work of creating videos with AI tools and the results are not generalisable to the rest of the population. To strengthen the findings of this research, replication of the experiment with a larger and more diverse sample of participants across various educational and professional backgrounds is necessary, as well as to compare and study other types of videos that are generated with the Sora tool.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Selected frames of real and Sora's videos. (a) Frame of a real video of the Amalfi Coast. (b) Frame of a Sora's video of the Amalfi Coast. (c) Frame of a real video of Santorini. (d) Frame of a Sora's video of Santorini.</figDesc><graphic coords="3,320.20,441.01,203.08,141.73" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_1"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: The bar plots depict the responses gathered for question Q6 which assesses the factor of "illumination" concerning the variable of attraction.</figDesc><graphic coords="7,72.00,72.00,451.28,325.99" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_2"><head>Figure 3 :</head><label>3</label><figDesc>Figure 3: The bar plots depict the responses gathered for question Q14 which assesses the factor of "perspective" concerning the variable of composition.</figDesc><graphic coords="8,72.00,72.00,451.28,325.99" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>Figure 4 :</head><label>4</label><figDesc>Figure 4: The bar plots depict the responses gathered for question Q20 which assesses the realism.</figDesc><graphic coords="9,72.00,72.00,451.28,325.99" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Figure 5 :</head><label>5</label><figDesc>Figure 5: The bar plots depict the responses gathered for question Q16 which assesses the realism.</figDesc><graphic coords="10,72.00,72.00,451.28,325.99" type="bitmap" /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_0"><head>Table 1 :</head><label>1</label><figDesc>Survey questions and variables</figDesc><table><row><cell>Question</cell><cell>Items</cell></row><row><cell>ID</cell><cell></cell></row></table></figure>
		</body>
		<back>

			<div type="funding">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Funding</head><p>This work was supported by the Autonomous Community of Madrid (Spain) with a grant for industrial doctorates (IND2022/SOC-23503) with the collaboration agreement with Prodigioso Volcán S.L; Universidad Rey Juan Carlos (ID 501100007511) with a grant call for Personnel in Training 2020 (PREDOC 20-008). This study was carried out within the MICS (Made in Italy -Circular and Sustainable) Extended Partnership and received funding from the European Union Next-GenerationEU (Piano Nazionale di ripresa e resilienza (PNRR) -Missione 4 Componente 2, Investimento 1.3 -D.D. 1551.11-10-2022, PE00000004).</p></div>
			</div>

			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<monogr>
		<author>
			<persName><forename type="first">Y</forename><surname>Cao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Liu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Yan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Dai</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><forename type="middle">S</forename><surname>Yu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Sun</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2303.04226</idno>
		<title level="m">A comprehensive survey of ai-generated content (aigc): A history of generative ai from gan to chatgpt</title>
				<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b1">
	<monogr>
		<author>
			<persName><forename type="first">F</forename><surname>Fui-Hoon Nah</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Zheng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Cai</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Siau</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Chen</surname></persName>
		</author>
		<title level="m">Generative ai and chatgpt: Applications, challenges, and ai-human collaboration</title>
				<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Generative artificial intelligence: Can chatgpt write a quality abstract?</title>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">E</forename><surname>Babl</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">P</forename><surname>Babl</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Emergency Medicine Australasia</title>
		<imprint>
			<biblScope unit="volume">35</biblScope>
			<biblScope unit="page" from="809" to="811" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Comparing the ideation quality of humans with generative artificial intelligence</title>
		<author>
			<persName><forename type="first">J</forename><surname>Joosten</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Bilgram</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Hahn</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Totzek</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Engineering Management Review</title>
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">Art and the science of generative ai</title>
		<author>
			<persName><forename type="first">Z</forename><surname>Epstein</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Hertzmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Of Human</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Creativity</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Akten</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Farid</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">R</forename><surname>Fjeld</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Frank</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Groh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Herman</surname></persName>
		</author>
		<author>
			<persName><surname>Leach</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Science</title>
		<imprint>
			<biblScope unit="volume">380</biblScope>
			<biblScope unit="page" from="1110" to="1111" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">Generative ai in education and research: Opportunities, concerns, and solutions</title>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">A</forename><surname>Alasadi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">R</forename><surname>Baiz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Chemical Education</title>
		<imprint>
			<biblScope unit="volume">100</biblScope>
			<biblScope unit="page" from="2965" to="2971" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<monogr>
		<author>
			<persName><forename type="first">J</forename><surname>Achiam</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Adler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Agarwal</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Ahmad</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Akkaya</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">L</forename><surname>Aleman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Almeida</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Altenschmidt</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Altman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Anadkat</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2303.08774</idno>
		<title level="m">Gpt-4 technical report</title>
				<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">A survey of gpt-3 family large language models including chatgpt and gpt-4</title>
		<author>
			<persName><forename type="first">K</forename><forename type="middle">S</forename><surname>Kalyan</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Natural Language Processing Journal</title>
		<imprint>
			<biblScope unit="page">100048</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b8">
	<monogr>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">X</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Zhou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Tang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Hou</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Min</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Dong</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2303.18223</idno>
		<title level="m">A survey of large language models</title>
				<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Transformers: State-of-the-art natural language processing</title>
		<author>
			<persName><forename type="first">T</forename><surname>Wolf</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Debut</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Sanh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Chaumond</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Delangue</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Moi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Cistac</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Rault</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Louf</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Funtowicz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2020 conference on empirical methods in natural language processing: system demonstrations</title>
				<meeting>the 2020 conference on empirical methods in natural language processing: system demonstrations</meeting>
		<imprint>
			<date type="published" when="2020">2020</date>
			<biblScope unit="page" from="38" to="45" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<monogr>
		<author>
			<persName><forename type="first">J</forename><surname>Betker</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Goh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Jing</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Brooks</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Ouyang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Zhuang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Lee</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Guo</surname></persName>
		</author>
		<ptr target="https://cdn.openai.com/papers/dall-e-3.pdf2" />
		<title level="m">Improving image generation with better captions</title>
				<imprint>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page">8</biblScope>
		</imprint>
		<respStmt>
			<orgName>Computer Science</orgName>
		</respStmt>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">A survey of multimodal deep generative models</title>
		<author>
			<persName><forename type="first">M</forename><surname>Suzuki</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Matsuo</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Advanced Robotics</title>
		<imprint>
			<biblScope unit="volume">36</biblScope>
			<biblScope unit="page" from="261" to="278" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<monogr>
		<author>
			<persName><surname>Openai</surname></persName>
		</author>
		<ptr target="https://openai.com/index/sora/" />
		<title level="m">Creating video from text sora is an ai model that can create realistic and imaginative scenes from text instructions</title>
				<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b13">
	<monogr>
		<author>
			<persName><forename type="first">Y</forename><surname>Liu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Yan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Gao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Chen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Yuan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Huang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Gao</surname></persName>
		</author>
		<idno type="DOI">10.48550/arXiv.2402.17177</idno>
		<idno type="arXiv">arXiv:2402.17177</idno>
		<ptr target="https://doi.org/10.48550/arXiv.2402.17177" />
		<title level="m">Sora: A review on background, technology, limitations, and opportunities of large vision models</title>
				<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b14">
	<monogr>
		<author>
			<persName><forename type="first">R</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Shah</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Sun</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Li</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Duan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Wei</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Ranjan</surname></persName>
		</author>
		<idno type="arXiv">arXiv:2405.10674</idno>
		<title level="m">From sora what we can see: A survey of text-to-video generation</title>
				<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b15">
	<monogr>
		<author>
			<persName><forename type="first">T</forename><surname>Brooks</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Peebles</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Holmes</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Depue</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Guo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Jing</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Schnurr</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Taylor</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Luhman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Luhman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Ng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Ramesh</surname></persName>
		</author>
		<ptr target="https://openai.com/research/video-generation-models-as-world-simulators" />
		<title level="m">Video generation models as world simulators</title>
				<imprint>
			<publisher>openai</publisher>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">Scalable diffusion models with transformers</title>
		<author>
			<persName><forename type="first">W</forename><surname>Peebles</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Xie</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the IEEE/CVF International Conference on Computer Vision</title>
				<meeting>the IEEE/CVF International Conference on Computer Vision</meeting>
		<imprint>
			<date type="published" when="2023">2023</date>
			<biblScope unit="page" from="4195" to="4205" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<monogr>
		<title level="m" type="main">From text to video with ai: the rise and potential of sora in education and libraries</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">J</forename><surname>Adetayo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">I</forename><surname>Enamudu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">M</forename><surname>Lawal</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">O</forename><surname>Odunewu</surname></persName>
		</author>
		<idno type="DOI">10.1108/LHTN-02-2024-0028</idno>
		<ptr target="https://doi.org/10.1108/LHTN-02-2024-0028" />
		<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
		<respStmt>
			<orgName>Library Hi Tech News</orgName>
		</respStmt>
	</monogr>
</biblStruct>

<biblStruct xml:id="b18">
	<monogr>
		<title level="m" type="main">Generative artificial intelligence enhances creativity</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">R</forename><surname>Doshi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Hauser</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
	<note>Available at SSRN</note>
</biblStruct>

<biblStruct xml:id="b19">
	<monogr>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">Fernández</forename><surname>Mateo</surname></persName>
		</author>
		<title level="m">Realidad artificial. un análisis de las potenciales amenazas de la inteligencia artificial</title>
				<imprint>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<monogr>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">H</forename><surname>Mogavi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Tu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Hadan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">A</forename><surname>Sgandurra</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Hui</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">E</forename><surname>Nacke</surname></persName>
		</author>
		<idno type="DOI">10.48550/arXiv.2403.14665</idno>
		<idno type="arXiv">arXiv:2403.14665</idno>
		<ptr target="https://doi.org/10.48550/arXiv.2403.14665" />
		<title level="m">Sora openai&apos;s prelude: Social media perspectives on sora openai and the future of ai video generation</title>
				<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">Verificación de los hechos: Aplicación metodológica en el medio de comunicación el bacán</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">E</forename><surname>Suárez-Roca</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><forename type="middle">L</forename><surname>Vélez-Bermello</surname></persName>
		</author>
		<idno type="DOI">10.46296/rc.v5i9.0042</idno>
		<ptr target="https://doi.org/10.46296/rc.v5i9.0042" />
	</analytic>
	<monogr>
		<title level="j">Revista Científica Arbitrada de Investigación en Comunicación, Marketing y Empresa REICOMUNICAR</title>
		<idno type="ISSN">2737-6354</idno>
		<imprint>
			<biblScope unit="volume">5</biblScope>
			<biblScope unit="page" from="163" to="184" />
			<date type="published" when="2022">2022</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<analytic>
		<title level="a" type="main">Las tecnologías de la información y comunicación en el aprendizaje</title>
		<author>
			<persName><forename type="first">C</forename><surname>Belloch</surname></persName>
		</author>
		<ptr target="https://bit.ly/468T21C" />
	</analytic>
	<monogr>
		<title level="s">Departamento de Métodos de Investigación y Diagnóstico en Educación</title>
		<imprint>
			<biblScope unit="volume">4</biblScope>
			<biblScope unit="page" from="1" to="11" />
			<date type="published" when="2012">2012</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b23">
	<monogr>
		<title level="m" type="main">Exploring the Ethical Implications of Generative AI</title>
		<author>
			<persName><forename type="first">A</forename><surname>Ara</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Ara</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2024">2024</date>
			<publisher>IGI Global</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">Generative ai and human-robot interaction: implications and future agenda for business</title>
		<author>
			<persName><forename type="first">B</forename><surname>Obrenovic</surname></persName>
		</author>
		<author>
			<persName><forename type="first">X</forename><surname>Gu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Wang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Godinic</surname></persName>
		</author>
		<author>
			<persName><forename type="first">I</forename><surname>Jakhongirov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">society and ethics</title>
		<imprint>
			<biblScope unit="page" from="1" to="14" />
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
	<note>AI &amp; SOCIETY</note>
</biblStruct>

<biblStruct xml:id="b25">
	<analytic>
		<title level="a" type="main">The dark side of generative artificial intelligence: A critical analysis of controversies and risks of chatgpt</title>
		<author>
			<persName><forename type="first">K</forename><surname>Wach</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">D</forename><surname>Duong</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Ejdys</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Kazlauskaitė</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Korzynski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Mazurek</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Paliszkiewicz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Ziemba</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Entrepreneurial Business and Economics Review</title>
		<imprint>
			<biblScope unit="volume">11</biblScope>
			<biblScope unit="page" from="7" to="30" />
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b26">
	<analytic>
		<title level="a" type="main">Connecting the dots in trustworthy artificial intelligence: From ai principles, ethics, and key requirements to responsible ai systems and regulation</title>
		<author>
			<persName><forename type="first">N</forename><surname>Díaz-Rodríguez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Del</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ser</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">L</forename><surname>Coeckelbergh</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>De Prado</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Herrera-Viedma</surname></persName>
		</author>
		<author>
			<persName><surname>Herrera</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Information Fusion</title>
		<imprint>
			<biblScope unit="volume">99</biblScope>
			<biblScope unit="page">101896</biblScope>
			<date type="published" when="2023">2023</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b27">
	<monogr>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">C R</forename><surname>Achi</surname></persName>
		</author>
		<title level="m">Manual de Formación Audiovisual</title>
				<imprint>
			<publisher>Cholsamaj Fundacion</publisher>
			<date type="published" when="2004">2004</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b28">
	<analytic>
		<title level="a" type="main">Image visual realism: From human perception to machine computation</title>
		<author>
			<persName><forename type="first">S</forename><surname>Fan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T.-T</forename><surname>Ng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><forename type="middle">L</forename><surname>Koenig</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Herberg</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Jiang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Z</forename><surname>Shen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Q</forename><surname>Zhao</surname></persName>
		</author>
		<idno type="DOI">10.1109/TPAMI.2017.2747150</idno>
		<ptr target="https://doi.org/10.1109/TPAMI.2017.2747150" />
	</analytic>
	<monogr>
		<title level="j">IEEE transactions on pattern analysis and machine intelligence</title>
		<imprint>
			<biblScope unit="volume">40</biblScope>
			<biblScope unit="page" from="2180" to="2193" />
			<date type="published" when="2017">2017</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b29">
	<monogr>
		<author>
			<persName><forename type="first">R</forename><surname>Shirley</surname></persName>
		</author>
		<ptr target="https://www.youtube.com/watch?v=Mupom-sgjAU" />
		<title level="m">Top 10 places on the amalfi coast -4k travel guide</title>
				<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b30">
	<monogr>
		<author>
			<persName><forename type="first">G</forename><forename type="middle">P</forename><surname>Pro</surname></persName>
		</author>
		<ptr target="https://www.youtube.com/watch?v=rXlqSYZOGnQ&amp;t=456s" />
		<title level="m">Santorini, greece -4k uhd drone video</title>
				<imprint>
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b31">
	<analytic>
		<title level="a" type="main">Diseños de investigación experimental</title>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">A R</forename><surname>Galarza</surname></persName>
		</author>
		<idno type="DOI">10.33210/ca.v10i1.356</idno>
		<ptr target="http://dx.doi.org/10.33210/ca.v10i1.356" />
	</analytic>
	<monogr>
		<title level="j">CienciAmérica: Revista de divulgación científica de la Universidad Tecnológica Indoamérica</title>
		<imprint>
			<biblScope unit="volume">10</biblScope>
			<biblScope unit="page" from="1" to="7" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b32">
	<analytic>
		<title level="a" type="main">Editorial: Diseños de investigación experimental</title>
		<author>
			<persName><forename type="first">C</forename><surname>Ramos-Galarza</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">cienciamérica</title>
		<imprint>
			<biblScope unit="volume">10</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="1" to="7" />
			<date type="published" when="2021">2021</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b33">
	<analytic>
		<title level="a" type="main">Validez de contenido y juicio de expertos: una aproximación a su utilización</title>
		<author>
			<persName><forename type="first">J</forename><surname>Escobar-Pérez</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Á</forename><surname>Cuervo-Martínez</surname></persName>
		</author>
		<ptr target="https://bit.ly/3IlxiDV" />
	</analytic>
	<monogr>
		<title level="j">Avances en medición</title>
		<imprint>
			<biblScope unit="volume">6</biblScope>
			<biblScope unit="page" from="27" to="36" />
			<date type="published" when="2008">2008</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b34">
	<monogr>
		<author>
			<persName><forename type="first">T</forename><surname>Brooks</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Peebles</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Homes</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><surname>Depue</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Guo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Jing</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Schnurr</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Taylor</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Luhman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Luhman</surname></persName>
		</author>
		<title level="m">Video generation models as world simulators</title>
				<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b35">
	<analytic>
		<title level="a" type="main">A hero or a killer? overview of opportunities, challenges, and implications of text-to-video model sora</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kustudic</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><forename type="middle">F N</forename><surname>Mvondo</surname></persName>
		</author>
		<idno type="DOI">10.36227/techrxiv.171207528.88283144/v1</idno>
		<ptr target="https://doi.org/10.36227/techrxiv.171207528.88283144/v1" />
	</analytic>
	<monogr>
		<title level="j">Authorea Preprints</title>
		<imprint>
			<biblScope unit="volume">10</biblScope>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b36">
	<monogr>
		<author>
			<persName><forename type="first">J</forename><surname>Cho</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><forename type="middle">D</forename><surname>Puspitasari</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Zheng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Zheng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L.-H</forename><surname>Lee</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T.-H</forename><surname>Kim</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">S</forename><surname>Hong</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Zhang</surname></persName>
		</author>
		<idno type="DOI">10.48550/arXiv.2403.05131</idno>
		<idno type="arXiv">arXiv:2403.05131</idno>
		<ptr target="https://doi.org/10.48550/arXiv.2403.05131" />
		<title level="m">Sora as an agi world model? a complete survey on text-to-video generation</title>
				<imprint>
			<date type="published" when="2024">2024</date>
		</imprint>
	</monogr>
	<note type="report_type">arXiv preprint</note>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
