<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Archiving and Interchange DTD v1.0 20120330//EN" "JATS-archivearticle1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-title-group>
        <journal-title>Journal
of Machine Learning Research 24 (2023) 1-6. URL: http://jmlr.org/papers/v24/21</journal-title>
      </journal-title-group>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="doi">10.18653/V1/2020.ALW-1.15</article-id>
      <title-group>
        <article-title>Overview of the Multilingual Text Detoxification Task at PAN 2024</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="author">
          <string-name>Daryna Dementieva</string-name>
          <email>daryna.dementieva@tum.de</email>
          <xref ref-type="aff" rid="aff4">4</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Daniil Moskovskiy</string-name>
          <email>daniil.moskovskiy@skoltech.ru</email>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Nikolay Babakov</string-name>
          <email>nikolay.babakov@usc.ese</email>
          <xref ref-type="aff" rid="aff6">6</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Abinew Ali Ayele</string-name>
          <email>abinew.ali.ayele@uni-hamburg.de</email>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Naquee Rizwan</string-name>
          <email>nrizwan@kgpian.iitkgp.ac.in</email>
          <xref ref-type="aff" rid="aff1">1</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Florian Schneider</string-name>
          <email>lforian.schneider-1@uni-hamburg.de</email>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Xintong Wang</string-name>
          <email>xintong.wang@uni-hamburg.de</email>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Seid Muhie Yimam</string-name>
          <email>seid.muhie.yimam@uni-hamburg.de</email>
          <xref ref-type="aff" rid="aff7">7</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Dmitry Ustalov</string-name>
          <email>dmitry.ustalov@jetbrains.com</email>
          <xref ref-type="aff" rid="aff2">2</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Elisei Stakovskii</string-name>
          <email>eistakovskii@gmail.com</email>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Alisa Smirnova</string-name>
          <xref ref-type="aff" rid="aff5">5</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Ashraf Elnagar</string-name>
          <email>ashraf@sharjah.ac.ae</email>
          <xref ref-type="aff" rid="aff8">8</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Animesh Mukherjee</string-name>
          <email>animeshm@gmail.com</email>
          <xref ref-type="aff" rid="aff1">1</xref>
        </contrib>
        <contrib contrib-type="author">
          <string-name>Alexander Panchenko</string-name>
          <email>a.panchenko@skol.tech</email>
          <xref ref-type="aff" rid="aff0">0</xref>
          <xref ref-type="aff" rid="aff3">3</xref>
        </contrib>
        <aff id="aff0">
          <label>0</label>
          <institution>Artificial Intelligence Research Institute</institution>
          ,
          <addr-line>Moscow</addr-line>
          ,
          <country country="RU">Russia</country>
        </aff>
        <aff id="aff1">
          <label>1</label>
          <institution>Indian Institute of Technology</institution>
          ,
          <addr-line>Kharagpur</addr-line>
          ,
          <country country="IN">India</country>
        </aff>
        <aff id="aff2">
          <label>2</label>
          <institution>JetBrains</institution>
          ,
          <addr-line>Belgrade</addr-line>
          ,
          <country country="RS">Serbia</country>
        </aff>
        <aff id="aff3">
          <label>3</label>
          <institution>Skolkovo Institute of Science and Technology</institution>
          ,
          <addr-line>Moscow</addr-line>
          ,
          <country country="RU">Russia</country>
        </aff>
        <aff id="aff4">
          <label>4</label>
          <institution>Technical University of Munich</institution>
          ,
          <addr-line>Munich</addr-line>
          ,
          <country country="DE">Germany</country>
        </aff>
        <aff id="aff5">
          <label>5</label>
          <institution>Toloka AI</institution>
          ,
          <addr-line>Lucerne</addr-line>
          ,
          <country country="CH">Switzerland</country>
        </aff>
        <aff id="aff6">
          <label>6</label>
          <institution>Universidade of Santiago de Compostela</institution>
          ,
          <addr-line>Santiago de Compostela</addr-line>
          ,
          <country country="ES">Spain</country>
        </aff>
        <aff id="aff7">
          <label>7</label>
          <institution>Universität Hamburg</institution>
          ,
          <addr-line>Hamburg</addr-line>
          ,
          <country country="DE">Germany</country>
        </aff>
        <aff id="aff8">
          <label>8</label>
          <institution>University of Sharjah</institution>
          ,
          <addr-line>Sharjah, UAE</addr-line>
        </aff>
      </contrib-group>
      <pub-date>
        <year>2024</year>
      </pub-date>
      <volume>198</volume>
      <fpage>114</fpage>
      <lpage>124</lpage>
      <abstract>
        <p>Despite diferent countries and social platform regulations, digital abusive speech persists as a significant challenge. One of the way to tackle abusive, or more specifically, toxic language can be automatic text detoxification-a text style transfer task (TST) of changing register of text from toxic to more non-toxic. Thus, in this shared task, we aim to obtain text detoxification models for 9 languages: English, Spanish, German, Chinese, Arabic, Hindi, Ukrainian, Russian, and Amharic. This paper presents the Multilingual Text Detoxification (TextDetox) task, the underlying datasets, the evaluation setups, the submissions from participants, and the results obtained. Warning: This paper contains rude texts that only serve as illustrative examples.</p>
      </abstract>
      <kwd-group>
        <kwd>eol&gt;PAN 2024</kwd>
        <kwd>Multilingual Text Detoxification</kwd>
        <kwd>Text Style Transfer</kwd>
        <kwd>Multilingualism</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec id="sec-1">
      <title>1. Introduction</title>
      <p>
        Models (LLMs) [
        <xref ref-type="bibr" rid="ref2">2</xref>
        ]. The typical approach to dealing with abusive speech on social platforms involves
message blocking [
        <xref ref-type="bibr" rid="ref3">3</xref>
        ]. To address this, numerous toxic and hate speech detection models have been
developed for diferent languages, i.e. English [
        <xref ref-type="bibr" rid="ref4">4</xref>
        ], Spanish [
        <xref ref-type="bibr" rid="ref5">5</xref>
        ], Amharic [
        <xref ref-type="bibr" rid="ref6">6</xref>
        ], Code-Mixed Hindi [
        <xref ref-type="bibr" rid="ref7">7</xref>
        ], and
many others [
        <xref ref-type="bibr" rid="ref8">8</xref>
        ]. However, the recent research indicates a necessity for more proactive moderation of
abusive speech [
        <xref ref-type="bibr" rid="ref9">9</xref>
        ]. One such approach is text detoxification .
      </p>
      <p>
        Within the baselines approaches for automatic text detoxification, multiple unsupervised baselines
were created based on ideas of Delete-Retrieve-Generate [
        <xref ref-type="bibr" rid="ref10">10</xref>
        ], latent style spaces disentanglement [
        <xref ref-type="bibr" rid="ref11">11</xref>
        ],
or conditional generation with Masked Language Modeling [
        <xref ref-type="bibr" rid="ref12">12</xref>
        ]. However, the latest state-of-the-art
outcomes, particularly in English, were attained when parallel data and fine-tuning with text-to-text
generation models were employed [
        <xref ref-type="bibr" rid="ref13 ref14">13, 14</xref>
        ]. At the same time, the availability of such a corpus can be a
challenge for new languages and cross-lingual transfer techniques should be applied [
        <xref ref-type="bibr" rid="ref15">15</xref>
        ].
      </p>
      <p>Toxic Input</p>
      <p>What a f**k is this about?
А н**рена ты здесь это писал?
Та н**уй ти мені впав, скотина ти така)))</p>
      <p>Was für ein besch**senes Jahr</p>
      <p>Este país se va a la m**rda
ط**ارﺷ ﺎﯾ ﮫﺗزﺎﻧﺟﺑ اوﺷﻣﺗو لﯾﺗﻘﻟا اوﻠﺗﻘﺗ</p>
      <p>卧槽，抓到了！
ये माद**द डरे हुए लग रहे है ?
Detoxified Output</p>
      <p>What is this about?
А зачем ты здесь это писал?
Та навіщо ти мені потрібен</p>
      <p>Was für ein schlechtes Jahr.</p>
      <p>Cosas van muy mal en este país
ﮫﺗزﺎﻧﺟﺑ اوﺷﻣﺗو لﯾﺗﻘﻟا اوﻠﺗﻘﺗ</p>
      <p>天啊，抓到了！
ये लोग डरे हुए लग रहे है ?
አንተ ቆሻሻ በዚህ ወቅት አይንህን ማየት አልፈልግም</p>
      <p>አንተ ጥሩ ሰው አይደለህም በዚህ ወቅት አንተን ማየት አልፈልግም</p>
      <p>
        In this shared task, we explored both setups—cross-lingual and multilingual one—providing new
multilingual parallel text detoxification dataset for 9 languages [
        <xref ref-type="bibr" rid="ref16">16</xref>
        ]. The remainder of this paper is
structured as follows. Section 2 gives an overview of the TextDetox shared task description. Section 3
provides the full overview of the new multilingual parallel text detoxification dataset collection per
each language. In the following sections, the evaluation setups essentials are described—baselines
in Section 4, automatic evaluation setup in Section 5, and human evaluation setup in Section 6. The
submissions from participants are described in Section 7. Section 8 provides the details about final
results—both automatic (Section 8.1) and human (Section 8.2) evaluation leaderboards. Finally, Section 9
concludes the paper.
      </p>
      <p>All the resources produced from the task are listed at the shared task page 1 and are also mentioned
in the corresponding sections.</p>
    </sec>
    <sec id="sec-2">
      <title>2. Shared Task Description</title>
      <p>Here, we provide the shared task main definitions—how we understand toxicity, text style transfer task,
cross-lingual and multilingual setup, and the competition rules.</p>
      <sec id="sec-2-1">
        <title>1https://pan.webis.de/clef24/pan24-web/text-detoxification.html</title>
        <sec id="sec-2-1-1">
          <title>2.1. Task Definition</title>
          <p>
            Definition of Toxicity While there can be diferent types of toxic language in conversations [ 17, 18],
i.e. sarcasm, hate speech, threats, in this work, we include samples with substrings that are commonly
referred to as vulgar or profane language [
            <xref ref-type="bibr" rid="ref13">19, 13</xref>
            ] while the whole main message can be both neutral
and toxic, but not hateful with direct insult of individuals or groups of people.
          </p>
          <p>
            Text Detoxification as Text Style Transfer In this work, we adapt the formal task definition of the
text style transfer described in [
            <xref ref-type="bibr" rid="ref13">20, 21, 13</xref>
            ]:
          </p>
          <p>Having a set of style  and a corpus of texts , a text style transfer (TST) model is a function
 :  ×  ×  →  that, given a source style , a target style , and an input text ,
produces an output text  such that:
• The style of the text changes from the source style  to the target style  and is measured by
a style classifier:  () ̸=  (),  () = ;
• The content of the source text is saved in the target text as much as required for the task and
estimated by a content similarity function:  (, ) ≥  ;
• The fluency of the target text achieves the required level according to the fluency estimator:
 () ≥  ,
where  and  are the threshold values for the content preservation ( ) and fluency (  ) functions
and can be adjusted to the specific task. In our task, the source style  is toxic and the target style
 is non-toxic.</p>
          <p>Cross-lingual Text Detoxification As parallel text detoxification corpora might not be available
for any language, one of the important tasks is to explore cross-lingual text detoxification knowledge
transfer. In this case, we assume that training data is available for the resource-rich language (i.e.
English) and the task is to obtain a text detoxification system for a new language.</p>
          <p>Multilingual Text Detoxification If parallel corpora available for multiple language, then both
monolingual text detoxification models per language and multilingual model for all languages can be
obtained.</p>
        </sec>
        <sec id="sec-2-1-2">
          <title>2.2. Competition Rules</title>
          <p>The share task timeline was divided in to two phases—development and test.</p>
          <p>
            Development Phase For the first phase, only training parallel data for English and Russian from
previous works [
            <xref ref-type="bibr" rid="ref13">13, 22</xref>
            ] aiming to provide participants to explore cross-lingual transfer techniques.
Test Phase During the test phase, parallel text detoxification corpora were available for all target
languages. Participants was invited to submit monolingual and multilingual solutions.
Leaderboards During both phases, the leaderboards based on automatic evaluation were available.
We used Codalab platform [23]2 (and TIRA [24] as a backup platform). However, despite having powerful
models capable of classifying texts and embedding their meanings, human judgment remains superior
for making final decisions in the text detoxification task [ 25]. Thus, based on a test part subset, we
performed human evaluation of the participants submissions. The final leaderboard was based on
the human judgements results.
          </p>
        </sec>
      </sec>
      <sec id="sec-2-2">
        <title>2https://codalab.lisn.upsaclay.fr/competitions/18243</title>
      </sec>
    </sec>
    <sec id="sec-3">
      <title>3. Multilingual Parallel Text Detoxification Dataset</title>
      <p>For each of our 9 target languages, we prepared parallel text detoxification corpus. We asked experts
and native speakers to contribute for corpora collection. Further, we describe the collection details
per each language: English (Section 3.1), Russian (Section 3.2), Ukrainian (Section 3.3), Spanish
(Section 3.4), German (Section 3.5), Hindi (Section 3.6), Amharic (Section 3.7), Arabic (Section 3.8), Chinese
(Section 3.9). All the instructions per language are available online.3 We also opensource the obtained
resources for the public usage.4</p>
      <p>For all the data, we adapt the concept of English ParaDetox [26] collection pipeline as it was designed
to automate the data collection as well as verification with crowdsourcing. The pipeline consists of
three tasks:
Task 1: Rewrite text in a polite way Annotators need to provide the detoxified paraphrase of the
text so it becomes non-toxic and the main content is saved or to skip paraphrasing if the text is
not possible to rewrite in non-toxic way;
Task 2: Do these sentences mean the same? Check if the content is indeed the same between the
original toxic text and its potential non-toxic paraphrase;
Task 3: Is this text ofensive?</p>
      <sec id="sec-3-1">
        <title>Verification of the provided paraphrase if it is indeed non-toxic.</title>
        <p>In the same manner, each language stakeholder asked the annotators to rewrite the toxic samples
verifying the main three criteria: (i) the new paraphrase should be non-toxic; (ii) the content should be
saved as much as possible; (iii) the resulted text should be fluent but may contain some minor mistakes
(as the majority of the original toxic samples are examples from posts from social networks). The
rewriting of the texts and verification of their quality could have been done either via crowdsourcing or
via manual annotation. The main goal for each language was to obtain 1000 parallel pairs that were
later splitted into dev and test sets.</p>
        <p>Data Preprocessing For all languages, we maintain the length of samples as sentences of around
5-20 tokens. Also, if a text sample is from a social network, we anonymize any mentioning of usernames
and links.</p>
        <sec id="sec-3-1-1">
          <title>3.1. English</title>
          <p>
            For English, we reused the data from English ParaDetox dataset [
            <xref ref-type="bibr" rid="ref13">13</xref>
            ] and additionally manually marked
up approximately 400 pairs to form a validation dataset of 1000 examples.
3.1.1. Input Data Preparation
For EnParaDetox, the original toxic texts were taken from Jigsaw toxicity identification challenge train
dataset [27]. We have considered only texts labeled as toxic and severe toxic.
3.1.2. Annotation Process
The training and validation sets of EnParaDetox were acquired through crowdsourcing via Toloka5
platform with fluent English speakers. Additionally, we employed annotators who are fluent in English
and hold a Masters degree in Computer Science to compile additional samples to the test set.
3https://github.com/textdetox/textdetox_clef_2024/tree/main/instructions/paradetox_collection
4https://huggingface.co/textdetox
5https://toloka.ai
          </p>
        </sec>
        <sec id="sec-3-1-2">
          <title>3.2. Russian</title>
          <p>The same as for English, there were previously available training and validation data from previous
work [22, 28]. We reused this data and manually annotated some additional toxic examples taken from
various toxicity datasets.</p>
          <p>Input Toxicity Data The original toxic samples were taken from two binary toxicity classification
Kaggle Toxic Comments datasets [29, 30].
3.2.1. Annotation Process
The training and validation sets of RuParaDetox were acquired through crowdsourcing via Toloka
platform with fluent Russian speakers. Additionally, we employed annotators who are native in Russian
and hold a Masters degree in Computer Science to compile additional samples to the test set.</p>
        </sec>
        <sec id="sec-3-1-3">
          <title>3.3. Ukrainian</title>
          <p>We used the data presented in MultiParaDetox paper [28] providing the main details of data collection:
Input Toxicity Data For the Ukrainian language, there was no existing binary toxicity classification
corpus. Therefore, we filtered explicitly toxic samples containing obscene lexicon from the predefined
list [31] within the Ukrainian Tweets Corpus [32].
3.3.1. Annotation Process
We adapt ParaDetox [26] collection pipeline and verified the data quality via crowdsourcing. We utilized
the Toloka platform for crowdsourcing tasks in Ukrainian. The annotators, who were native Ukrainian
speakers, underwent an examination before starting the tasks.</p>
        </sec>
        <sec id="sec-3-1-4">
          <title>3.4. Spanish</title>
          <p>We used the data presented in MultiParaDetox paper [28] providing the main details of data collection:
Input Toxicity Data For Spanish, we selected samples for annotation from three datasets: hate
speech detection ones [33, 34] as well as filtered by keywords Spanish Tweets corpus [35].
3.4.1. Annotation Process
We adapt ParaDetox [26] collection pipeline and verified the data quality via crowdsourcing. We utilized
the Toloka platform for crowdsourcing tasks in Ukrainian. The annotators, who were native Spanish
speakers, underwent an examination before starting the tasks.</p>
        </sec>
        <sec id="sec-3-1-5">
          <title>3.5. German</title>
          <p>German ParaDetox was collected with several annotators with manual quality verification:
3.5.1. Input Data Preparation
The German language source data in this work is based on three datasets containing toxic, ofensive, or
hate speech comments on social media about primarily political events in Germany or the US. For the
two datasets from the GermEval 2018 [36] and GermEval 2021 [37] shared tasks, we used data from
both the test and the train split and filtered it as follows. For the GermEval 2018 data, we only used
samples labeled with the coarse class “OFFENSE” whereas for the GermEval 2021 data – which contains
diferent labels – we only used samples annotated with the “ Sub1_Toxic” class. The third dataset [38]
was filtered so only samples were kept where both expert annotators classified the samples as hate
speech.</p>
          <p>The data from the three datasets was merged and deduplicated via exact string matching. Furthermore,
we removed all samples that included less than 5 or more than 30 white-space separated tokens.
3.5.2. Annotation Process
To create the final parallel detoxified German dataset, we hired two native German annotators. Annotator
A is a female born in 1994 who holds a Master of Arts degree in Social Sciences, and Annotator B is a
male born in 1992 who holds a Master of Science degree in Computer Science. The data was distributed
so that each sample was transcribed by only one of the annotators.
3.6. Hindi
Hindi dataset was collected manually by a native-speaker annotator gaining data from multiple sources:
3.6.1. Input Data Preparation
Input Toxicity Data We used the HASOC dataset created at FIRE 2019 [39] as source for Hindi
language. Contents in this dataset are relevant within Indian subcontinent which are collected from
various social media platforms prevalent in India. In this dataset, hostile posts are divided into HATE
SPEECH, OFFENSIVE and PROFANE. For curation, posts containing OFFENSIVE and PROFANE contents
in train and test splits were used. 1455 PROFANE posts (1237 train + 218 test) and 873 OFFENSIVE posts
(676 train + 197 test) were chosen to prepare detoxifiable toxic data for our task.</p>
          <p>Input Preprocessing On a total of 2328 samples, we first performed deduplication via exact string
matching. Mentions, links and emojis were also removed as part of this step.
3.6.2. Annotation Process
Annotation Task(s) The posts after input preprocessing were manually verified. Those with less than
5 white-space separated tokens were removed and which had more than 25 white-space separated tokens
were re-framed to bring them down to this limit. Toxicity and meaning of the posts were unchanged
during this re-framing. These posts were then bifurcated into detoxifiable and non-detoxifiable labels.
The manual re-framing and bifurcation were carried by a NLP researcher with working experience on
hate/toxic speech.</p>
          <p>Out of 2328 samples, 1007 samples were marked as detoxifiable. From these detoxifiable samples,
we carefully sampled 24 data points and detoxified them. These detoxified samples were evaluated by
two experts who are native Hindi language speakers to provide precise samples to the annotators for
detoxifying the whole dataset. Annotators were guided based on expert prepared samples and were
asked to re-write toxic pairs in a non-toxic manner, keeping the meaning of the original post unchanged.
Detoxification was carried out by two annotators and we provide their details in the corresponding
subsection.</p>
          <p>Annotators One male NLP researcher working in the field of hate/toxic speech and another female
student enrolled in Bachelor’s Degree and having working knowledge in Machine Learning, were
employed to carry out the detoxification of whole dataset. Both annotators are Indian, native Hindi
speakers and are well versed with the topicality covered in the dataset.</p>
        </sec>
        <sec id="sec-3-1-6">
          <title>3.7. Amharic</title>
          <p>
            We compiled new Amharic ParaDetox datasets with the following annotation details, based on prior
studies of hate and ofensive language:
3.7.1. Input Data Preparation
The Amharic ParaDetox dataset is derived from merging two pre-existing studies conducted on the
X/Twitter datasets [
            <xref ref-type="bibr" rid="ref6">6, 40</xref>
            ]. The dataset introduced by Ayele et al. [40] was initially annotated into
categories of hate, ofensive , normal, and unsure by three native speaker annotators, with the gold labels
determined through a majority voting scheme. In contrast, the dataset presented by Ayele et al. [
            <xref ref-type="bibr" rid="ref6">6</xref>
            ]
was annotated by two native speakers, with a third adjudicator annotator deciding the gold labels for
instances where there was no majority consensus. We extracted a subset of these datasets labeled as
ofensive to create the new Amharic ParaDetox dataset and subsequently reworked this subset using
new annotators to determine if the messages could be detoxified and to present non-toxic versions of
each message.
          </p>
          <p>
            Input Toxicity Data The input toxicity data is entirely sourced from the two previous studies, namely
Ayele et al. [
            <xref ref-type="bibr" rid="ref6">6</xref>
            ] and Ayele et al. [40], and has been adapted to meet the requirements of the ParaDetox
task.
3.7.2. Annotation Process
Annotation Task(s) We customized the Potato-POrtable Text Annotation TOol6 and utilized it for
the annotation of Amharic ParaDetox dataset. Annotators were provided annotation guidelines, took
hands-on practical training, completed independent training tasks before the main annotation task.
          </p>
          <p>We conducted pilot annotation of 125 sample items with three native Amharic speaker annotators
and evaluated the annotation quality with experts and annotators together in a group meeting to
improve the understandings of annotators for the main task. The main annotation task comprises of
2,995 tweets, each annotated by one annotator. Annotators were asked to classify each tweet in to two
broad categories, detoxifiable and non-detoxifiable. For the detoxifiable category, annotators are asked
to detoxify and re-write the text. For non-detoxifiable tweets, annotators choose non-detoxifiable and
select reason as; it is hate speech, it is normal speech or indeterminate to decide the label.
Annotators Annotators have previous hate speech annotation experiences and already holds Masters
degree in Computer Science. Only two of the annotators were evolved in the main annotation task,
where both of them are university lecturers and have basic knowledge of natural language processing
tasks. One of the annotators is from Adama Scinece and Technology University with experience of
15 years of teaching Computer Science, who is female. The other annotator is a male, who has been
teaching Computer science over 12 years in Kotebe University of Education.</p>
        </sec>
        <sec id="sec-3-1-7">
          <title>3.8. Arabic</title>
          <p>3.8.1. Input Data Preparation
Arabic ParaDetox was collected with several annotators with manual quality verification:
The Arabic ParaDetox dataset was created by combining parts of several existing datasets along with the
Arabic-translated version of the Jigsaw dataset [27]. It includes the Levantine Twitter Dataset for Hate
Speech and Abusive Language (L-HSAB) [41], which focuses on Levantine dialects, and the Tunisian
Hate and Abusive Speech (T-HSAB) dataset [42], which targets Tunisian dialects. It also incorporates the
OSACT dataset [43] and the Arabic Levantine Twitter Dataset for Misogynistic Language (LeT-Mi) [44],
which specifically addresses gender-based abuse. These resources combine to form the Arabic ParaDetox
dataset, aimed at aiding the development of toxicity classifiers capable of handling Arabic content
across various dialects and contexts.
3.8.2. Annotation Process
Annotators The detoxification process was conducted by three annotators, each with a PhD. The
team includes two males and one female, all of whom have a strong interest in computational linguistics.
These native Arabic speakers possess a deep understanding of the subjects encompassed within the
dataset. Each text sample was transcribed by two of the annotators to ensure accuracy and consistency
in the data.</p>
        </sec>
        <sec id="sec-3-1-8">
          <title>3.9. Chinese</title>
          <p>3.9.1. Input Data Preparation
We collected new Chinese ParaDetox datasets with the following annotation details:
Input Toxicity Data The Chinese ParaDetox dataset is derived from TOXICN [45], a recently released
Chinese toxic language dataset. TOXICN was compiled from social media platforms and comprises
12,011 comments addressing several sensitive topics, including gender, race, region, and LGBTQ issues.
From this dataset, we extracted a subset based on multiple criteria: the number of toxic words, the ratio
of toxic words in the comments, the length of comments, and the toxic scores of comments.
Input Preprocessing We set thresholds for the criteria mentioned above: the number of toxic words
ranged from 1 to 5, the ratio of toxic words in comments was less than 0.5, and the length of comments
ranged from 3 to 50 words, ensuring suitability for annotators to rewrite them. Following these criteria,
we extracted 1,516 samples from the training set and 231 samples from the test set.</p>
          <p>Subsequently, we employed a pre-trained toxic classifier [ 45] to compute the toxic scores of the
selected comments, using a threshold score of 0.978 to filter the candidates. Ultimately, we collected
1,149 samples from the training set and 231 samples from the test set, resulting in a total of 1,380 samples
deemed suitable for annotation.
3.9.2. Annotation Process
Annotation Tasks For data annotation and verification, we employed a specifically designed
threetask pipeline:
Task 1: Determine if the sentences are toxic or neutral. Annotators were required to choose one
of three options: the given sentence is neutral, toxic but can be rewritten, or toxic and cannot
be rewritten. The last option was included based on the observation that some toxic texts are
impossible to rewrite in a non-toxic manner.</p>
          <p>Task 2: Rewrite sentences in a non-toxic style. Annotators were instructed to create detoxified
versions of the toxic sentences identified in Task 1. They were advised to retain the main content
of the original sentences and rewrite the toxic words in a polite manner.</p>
          <p>Task 3: Cross-check verification. The rewritten sentences from Task 2 were cross-distributed to
diferent annotators for verification. The goal was to ensure the rewritten sentences were
nontoxic and adhered to our guidelines. If annotators selected the “No” option, indicating the sentence
did not meet the criteria, a further meta-rewrite process was initiated.</p>
          <p>From the 1,380 toxic samples, 1,031 samples were successfully detoxified and verified, with 861 from
the training set and 170 from the test set. The remaining 349 samples were either considered non-toxic
or toxic but could not be rewritten.</p>
          <p>Annotators For the detoxification process, we hired three native Chinese annotators. Two female
annotators, both aged 22, hold Bachelor’s degrees in Engineering, and a male annotator, aged 32, holds a
Master’s degree in Computer Science. All annotators are native Chinese speakers residing in mainland
China, ensuring they deeply understand the Chinese language and the detoxification task.</p>
        </sec>
        <sec id="sec-3-1-9">
          <title>3.10. Final Dataset</title>
          <p>
            The full picture of the collected ParaDetox data for all target languages is presented in Table 1. While
the methods of collecting human annotations vary across languages—some data were gathered via
crowdsourcing, others by hiring local native speakers—the quality of the texts was uniformly verified
by experts to ensure three key attributes as introduced in [
            <xref ref-type="bibr" rid="ref13">46, 13</xref>
            ]: (i) the style of new paraphrases is
genuinely non-toxic, (ii) the main content is preserved, and (iii) the new texts are fluent.
          </p>
          <p>For each language for the shared task’s phases:
• During the development phase: 400 only toxic parts were available for participants to perform
cross-lingual experiments.
• During the test phase: (i) 400 ParaDetox instances were fully released; (ii) participants should
provide their final solutions for 600 toxic parts of the test dataset.</p>
          <p>
            For English and Russian during all phases, additional training parallel datasets were available released
from previous work [
            <xref ref-type="bibr" rid="ref13">13, 22, 28</xref>
            ]. You can find online fully released development part of the data 7 and
the test part only toxic instances.8
          </p>
        </sec>
      </sec>
    </sec>
    <sec id="sec-4">
      <title>4. Baselines</title>
      <p>We provide four baselines for our shared task: (i) trivial Duplicate baseline; (ii) a rule-based Delete
approach; (iii) Backtranslation pipeline that reduces the task to the monolingual one; (iv) finally,
finetuned for the downstream task on the dev dataset mT5 instance. The code for all the baselines is
available online.9</p>
      <sec id="sec-4-1">
        <title>7https://huggingface.co/datasets/textdetox/multilingual_paradetox 8https://huggingface.co/datasets/textdetox/multilingual_paradetox_test 9https://github.com/pan-webis-de/pan-code/tree/master/clef24/text-detoxification/baselines</title>
        <p>Duplicate Trivial baseline: the output sentence is a copy-paste of the input sentence. This baseline
has 1.0 (or 100%) SIM score by definition.</p>
        <p>
          Delete For the first unsupervised baseline, we perform an elimination of obscene and toxic substrings
from a text according to the predefined lists of keywords. For the shared task, we collected and compiled
together the lists of such toxic keywords for all target languages based on openly available sources
(see Table 2). The amount of toxic keywords per language difers which displays the diversity of
morphological forms and variations of toxicity expressions across languages. For participants and
further public usage, we release our compiled list online.10
Backtranslation As for a more sophisticated unsupervised baseline, we perform translation of
nonEnglish texts in English with NLLB [19] instance11 and then perform detoxification with the fine-tuned
on English ParaDetox train part BART [
          <xref ref-type="bibr" rid="ref13">13</xref>
          ] instance.12
Fine-tuned mT5 Specifically for the test phase, we fine-tuned the multilingual text-to-text generation
model mT5 [51]. We tuned the mT5-XL13 on the released for the test phase parallel development part of
the presented multilingual data.
        </p>
      </sec>
    </sec>
    <sec id="sec-5">
      <title>5. Automatic Evaluation Setup</title>
      <p>
        We adopt the monolingual evaluation pipelines from [
        <xref ref-type="bibr" rid="ref13">13, 22</xref>
        ] to our multilingual setup and provide the
detailed description below. We evaluate the outputs based on three parameters—style of text, content
preservation, and conformity to human references—combining them into the final Joint score. The
evaluation script is available online.14
Style Transfer Accuracy (STA) ensures that the generated text is indeed more non-toxic. To prepare
a model for this metric that covers our target languages, we subsampled 5 000 samples—2 500 toxic and
2 500 neutral—from toxicity classification corpora for each language (see references in Table 1) that
were not used for ParaDetox data collection. We released15 this compiled corpus for participants as an
additional dataset for experiments and fine-tuned XLM-R [ 52] large instance for the binary toxicity
10huggingface.co/datasets/textdetox/multilingual_toxic_lexicon
11https://huggingface.co/facebook/nllb-200-distilled-600M
12https://huggingface.co/s-nlp/bart-base-detox
13https://huggingface.co/google/mt5-xl
14https://github.com/pan-webis-de/pan-code/blob/master/clef24/text-detoxification/evaluate.py
15https://huggingface.co/datasets/textdetox/multilingual_toxicity_dataset
classification task. The model is also available for the public usage 16 and is used in the shared task to
estimate the level of non-toxicity in the texts.
      </p>
      <p>Content Similarity (SIM) is the cosine similarity between LaBSE17 embeddings [53] of the source
texts and the generated texts.</p>
      <p>
        Fluency (ChrF1) is used to estimate the proximity of the detoxified texts to human references. While
in several previous work language acceptability classifiers based on CoLa-like corpora were utilized for
lfuency estimation [
        <xref ref-type="bibr" rid="ref13 ref15">13, 15</xref>
        ], the recent work [25] also showed that reference-based metrics achieved high
correlations with human evaluation. Thus, we use an implementation of ChrF1 score from sacrebleu
library [54].
      </p>
      <p>Joint score (J) is the aggregation of the three above metrics. The metrics STA, SIM and ChrF1 are
subsequently combined into the final J score used for the final ranking of approaches. Given an input
toxic text  and its output detoxified version , for a test set of  samples:</p>
      <p>
        J = 1 ∑=︀1 STA() · SIM(, ) · ChrF1(, ),
where STA(), SIM(, ), ChrF1(, ) ∈ [
        <xref ref-type="bibr" rid="ref1">0, 1</xref>
        ] for each text detoxification output .
      </p>
    </sec>
    <sec id="sec-6">
      <title>6. Human Evaluation Setup</title>
      <p>For the test set, we performed the human evaluation to obtain final judgements on the participants’
systems. The details and instructions of the annotation setups are available for the public usage.18</p>
      <sec id="sec-6-1">
        <title>6.1. General setup</title>
        <p>We used Toloka19 crowdsourcing platform for manual evaluation of automatic detoxification. For each
project, the annotators who indicated the knowledge of the necessary language could access the tasks.
Quality Control First, the annotators underwent a rigorous selection process, starting with a training
pool of 20 tasks with predefined answers and instructional feedback. Following this, annotators faced an
examination pool comprising 10 tasks with known answers. Only those achieving a minimum accuracy
of 65% in both the training and examination pools were permitted to proceed to the main evaluation
tasks.</p>
        <p>During the execution of the real tasks, annotators’ performance was meticulously monitored using
several measures. Response speed was a critical factor; annotators who responded rapidly (less than 15
seconds per page) to three consecutive task pages were permanently banned. Additionally, we evaluated
the answers of the annotators to mixed tasks with known true answers. Those who demonstrated low
performance, were defined as achieving less than 65% accuracy, were required to undergo re-examination.
Furthermore, annotators who skipped three task pages were also banned from the study.
Evaluation Dataset We selected 100 random original toxic samples per each language from the
test part of our dataset. Each pair was annotated within a separate project dedicated to the respective
language. These projects were categorized into three distinct types, which will be described in detail in
the following section.
16https://huggingface.co/textdetox/xlmr-large-toxicity-classifier
17huggingface.co/sentence-transformers/LaBSE
18https://github.com/textdetox/textdetox_clef_2024/tree/main/instructions/human_evaluation
19https://toloka.ai
17 1 Text 1
Interested Tolokers
2 Text 2
3</p>
        <p>None</p>
      </sec>
      <sec id="sec-6-2">
        <title>6.2. Annotation projects and corresponding metrics</title>
        <p>In5g/e3n1e/r2al4,,th1e1c:o1n7cePpMt of the human evaluation mirrored the approach used inTothloekaau:toDmaattaicseovlaultuiaotniotno. drive AI
Each project type focused on assessing one of the three key qualities of detoxification; style transfer
accuracTy,rcyonoteuntt stimasilakristy,aPorsroflujaeecntTcsyo. lBUoeslkoewers,rwaeSnkpirldlosvrideePvraoidfeielewtailreMedesdsesusaclgrteispstionDoofcsea&amp;chRepsrooujercctetsype
and its Lspoeockificfofropcoussswibiltehpinrotbhlee mevsatlhuaattiToonlopkreorscemssig.ht run into and fix them for best results. Check the instructions and th
Projects Which texdteisvimceorsewofhfeennsicvoe?m-pgloeltdinengptaaisrsks</p>
        <p>popular scWrheiechn tseixzteis.8m3o%re oofffeunsseivres?use mobile
Style Transfer Accuracy To measure style transfer accuracy, we employed a pairwise comparison
betweeGnentehreal otarsigksinal toxic text and the generated detoxified text. Annotators were tasked with
determining which text was more toxic: the left text, the right text, or neither. An illustration of this task
can be WfouhndiicnFhigutree2.xt is more offensive? - golden pairs
To ePnuhbalniccdeersecarliipsmtio,nw: Ienrdaicnadtoemwihziecdhttheextsiesquence of original and detoxified texts. Annotators’ votes
more offensive
were tPhreivnatceocnovmermteednitn: ttooxincu_pmaeirrwicisael values using the following logic: if the original toxic text was
deemedWmhicohretetxotxisicm,tohree ovfofetenswivea?s recorded as 1; if the detoxified text was considered more toxic, the
vote was recorded as 0; if “none of the texts” was selected, the vote was recorded as 0.5.</p>
        <p>Which of the texts is more offensive?</p>
        <p>Labeling completed!
1)</p>
        <p>StaIrtadmateN: Manaycy10a,2n0d24Il9ik:3e4tAoMruin people's fun.</p>
        <p>Accepted 100% (38 of 38)
2) I am Nancy and I like to fuck up people's fun.</p>
        <p>17
Tolokers submitted task
0
Skipped task suites
0
Expir
Overview Efficiency Statistics Settings</p>
        <p>Figure 2: The English interface example of crowdsourcing Task 1: pairwise toxicity comparison.</p>
        <p>Which of the texts is more offensive?</p>
        <p>Some issues need your attention
Content Sim1C)ihlaecrkitoyur reFcoormtmheencdoantiotensnttosiimmpirloavreiteyffimcieentcryic, we presented pairs of texts in a fixed order
(the toxic phrase follotgwheiesttdisnabgymkteichkmeedidsettabokyxegifieardegpaphirnarac.stiec)eatnedstasskmeaddaensnuorteattohrast tionienvdeircamteakwehether the sense
of the two texts was similar. Annotators’ responses were binary, with “yes” indicating that the sense
was similar and “no” indicating otherwise. The responses were mapped to numerical values, with a
“yes” respons2e)beTiansgkgrseecttoinrdgemdyasa1ssankidckae"dnob"yrgersepopnrascetibceeintegsrtescmoraddeed sausr0e. tAhanAt iuilldnueisevtrenarctieon of this task
can be found in Figurmea3k. e this same mistake again .</p>
        <p>Speed/quality balance Top 90% of Tolokers wh
Fluency F3o8rthe fluency metric, sente10nc0es were evaluated individually rLaathnegrutahgaensin pairs. AnnoEtantgolrissh (exam-based)
were asked Twashke1stuhieterseTeacxht1text was inGteelnliegraibltlaesaksnd correct, with three possible responses: yes, partially,
and no. Each response was mapped to a numeric value: “yes” was scoredWahsic1h, “tpeaxrttiisamllyo”reas 0.5, agnrde“anteor”than 75
as 0. The fluency scoreTefoxrt 2a pair of texts was determined by comparing tohfefesncsoirvees?o-fetxhaemdetoxified text
2
to the origin1a0l 0t3ext. IfNtohneedetoxified tex4t5received a higher or equal fluencyWshcoicrhe,tethxet ipsamirowreas scoreMdisassing
1; otherwiseC,ointtwrolatsasskcsored as 0. An iTllrauisntinragttiaosnksof this task can be founodffeinnsFiivgeu?r-ew4o.rk greater than or equal to</p>
        <p>Skip
Joint Score The final scores for each pair within each project were calculated by weighting them
according to the skill of the crowdsourcing annotators. This skill was dynamically assigned based on
the quality of their responses to both the tasks with known answers and theQreuaalltiatyskcso.Tnhtreollogic for
calculating the JPoirnictescaornedinotvheerlmaapnual evaluation mirrors that of the automatic evaluation. For each
Price per task suite $0.05
Overlap 3 Tolokers
Price per 1 item $0.026
Fast responses</p>
        <p>Minimum time per task suite — 2
Public description: Check if the meanings of the two sentences are close or not.</p>
        <p>Private comment: content
5/31/D2o4th,e1se1s:e3n6tenPceMs mean the same?
Toloka: Data solution
TryDo otheusettwtoasesntkenscesaPmrseoajnaetchetTssoamleUotshkienger?sr aSnkildls rePvroifeilew rMeesssualgtess</p>
        <p>Labeling completed!
Docs &amp; Resources
Look for pSotsarstdibatlee: Mparyo2b0l,e2m02s4 3t:h0a7PtMTolokers might run into and fix them for best results. Check the</p>
        <p>Dear Greenman, As usual you have proven to be an arse hole
popular screen size. 83% of users use mobile devices when completing tasks
Projects Is this sentence grammatical? Is this sentence grammatical? - golden pairs
Is t17his sentence16grammatical0? - golden pa3irs
Public description: Check if the sentence is understandable and grammatically correct.</p>
        <p>Inter1ested TYoeloskers2</p>
        <p>No</p>
        <p>Tolokers submitted task</p>
        <p>Skipped task suites</p>
        <p>Expired
Private comment: fluency
Is this sentence grammatical?
No way number bricks fit in a car</p>
        <p>Lsatilblsetalnindigngcuop mforpyoleurtelyidng! crazy leader who loves himself and
Sntaorbtoddaytee:lsMe .</p>
        <p>ay 14, 2024 10:04 AM
Is the sentence intelligible and correct?
y
AccepteYdESst,iltlh0deerefeanrdeinngoothfmeiisrtalykinegs aonrdmsineolfrismhisletaakdeesr.(punctuation, casing)</p>
        <p>10 % (39 39)
p39
Task1suites
n</p>
        <p>PARTIALLY, mistak1e0s0do not hamper understanding the text</p>
        <p>Languages</p>
        <p>No General tasks</p>
        <p>Yes 2
NO, mistakes make it difficult to understand the text Client</p>
        <p>Audience
Figu2rIne7t4e8:reTshteeEdnTgolilsohkeinrtserface1e2x0ample of crowdTsooluorkceinrsg sTuasbkm3i:ttfemludeeantancsyk
theevaslaumateio?n-.</p>
        <p>Training tasks
11</p>
        <p>Do these sentences
WORK
Speed/quality balance</p>
        <p>Top 90% of Tolokers who
English (exam-based)
Toloka for mobile, Toloka
M0issing
gSrkeipatpeerdthtaansk65su; ites</p>
        <p>DGo othtehsse ttweonsdenttoenlcisestemneatnothGeostahmiec trhoincgk?, rather than bDaobtyhepsoeps.enIfteynocuesdesirgere,ayteoruthcaann65
accuracy, content simHilaerditeys,earnvde sflueenvceyry) ubseiantgingthaensdamraepefohremwulilal baes isnuEbthXjeeAcMateudtotom. atic evaluation.
language, thleoJcoaintet sscoourercweass vdeetreifryminingedthbisy imnfuoltrimplayitniognt.he three imndeiavni dthuealsasmcoer?es- (style transfer</p>
        <p>OvSekiprviewPriceSatantdisotivcesrlap Settings</p>
      </sec>
    </sec>
    <sec id="sec-7">
      <title>7. Participants</title>
      <p>y
Is the sentence intelligible and correct?</p>
      <p>YES, there are no mistakes or minor mistakes (punctuation, casing)</p>
      <p>Price per task suite $0.03
We received 20 submissions for the development phase leaderboard and 31 submQiussailoitnys cfoornthtreotlest phase
p PARTIALLY, mistakes do not hamper understanding the text
leaderboarOdv;etrhlaepfina3l Tmoalonkuearslly evaluated leaderboard was based on 17 submissions who confirmed
Each teamPisricperepseernT1t aeitdesmwkisth$0i.t0s1r6espective leaderboard name (in some cases, additionally, by aAusuedrniaemnece
their participnationNinOt,hmeisctoamkepsetmitaiokne.itHdeifrfeic, uwltetobruinefldyedresstacnridbethtehteesxotluEtaironninsgosf our final paIrfteicairpneadntins.last 24 hours &gt; 1 , t
of the corresponding team member that did a submission) and approaSckhipupseedd in bracketsI:f task suites skipped in a row ≥ 1
in target langpCuoangterosP.lAtRasTkIAsLLY, mistakes doTnraoitnhinagmtpaesrkusnderstanding the text
Submitted
Team cake, Submission d1n910 (few-shot Kimi.AI) [55] The participants achieved the resulting</p>
      <p>S3a9me situation is going o1n0in0the pnw too.
score with a few-short LLM inference by using a two stage process: Cfirsotn,t4ro0l0tassakms pLlaesngfruoRamecgeEenNtscoanntrdol and training tEasnk greli</p>
      <p>Task suites General tasks
RU provided datasets were used to be detoxified by a proprietary LLM—Kimi.AI [ 56] which is a large</p>
      <p>Is the sentence intelligible and correct? Is thisIf sneumntbeernocf etask responsesgr≥e a1
language model chatbot developed by Moonshot AI, a Beijing-based startup. In the second step, the
grammatical? - exam
participantsyemploYyEeSd,nthewerleyadreetnooximfiedisstaakmepsleosr mtoincoornmstirsutackteasp(rpoumncpttuawtihoenr,ecathseinygw)ere iDnocltuhedseedsaenstences mean the sa
examples of t6he0desired behavior and the m6o0del Kimi.AI, thus, was prompted to perfIsortmhidsestoexnifictaetniocne
Miss
grammIfsautbimcaittle?d-tawskosrukites ≥ 20 , t
grea
sentencSeskiwp as detPoxriifiecdethaenmdosotavpeprrolaprpiate way.</p>
      <p>n</p>
    </sec>
    <sec id="sec-8">
      <title>Team SINAI, SubmNOis,smioisntaeksetsremllaak(eTirtedeif-foicfu-lTt htoouungdhetrswtainthd GthPeTte-3x.t5) [57]</title>
      <sec id="sec-8-1">
        <title>To get the results, Team</title>
        <p>SINAI employed the Tree-of-Thought prompting strategy based on thFeaOstpreenspAoIn’ssemsodel GMiPnTim-u3m.5t[im58e]p.er task suite — 15
Given a toxic sentence, the model was prompted to output three options of potential detoxified sentences.
Then the model was asked to decide in terms of ofensiveness, content, and fluency which oneIfoQnuutmutbhaeerlsioetfyfast task suite respon
control
responses
assignments Speed/quality balance</p>
        <p>Top
Price per task suite $0.044</p>
        <p>Fast responses
Optional settings</p>
        <p>Minimum ti
If number
Team MarSanAI, Submission maryam.najafi (Mistral-7b with PPO) [59] This team ofered a
solution only for two languages: English and Russian. A reinforcement learning method was utilized
to fine-tune an LLM—Mistral-7b [ 60]—coupled with a Proximal Policy Optimization (PPO) [61] using
the implementation from HuggingFace TRL [62]; the reward was obtained using the provided toxicity
classifier.</p>
        <p>Team Linguistic_Hygienist, Submission gangopsa (T5 &amp; BART) [63] The solution consisted of
two components: i) the supervised solution for the English and Russian languages; ii) the unsupervised
solution for the other 7 languages. The supervised solution used T5 [64] and BART [65] as base
models; the exponentially weighted moving average and ROUGE scores were used as loss functions
for Russian and English, respectively. The unsupervised solution utilized hashing techniques, log odds
ratio, and grammatical rules to identify and conceal toxic words across other 7 languages; additionally,
it incorporated a mask prediction model to maintain the original sentences meaning intact.</p>
        <p>Team VitalyProtasov (mT0-large) [66] In the proposed solution, the team used a text-to-text
model—mT0-large [67]—which was trained on diferent combinations of languages. In addition, before
training, certain filtering techniques were applied to the data.</p>
        <p>Team nikita.sushko (mT0-XL) [68] The participant used the text-to-text mT0-XL [67] model that
was fine-tuned in two stages. In the first stage, a model was fine-tuned on the parallel data of all
languages; this model was used to generate synthetic parallel data from non-parallel samples. The
resulting data was cleaned and filtered using a cosine distance between LaBSE embeddings and the
toxicity scores by the provided classification models followed by a modification with improved delete
approach. At the end, the synthetic and filtered “golden” data were merged into new training set to
ifne-tune a new instance of the text-to-text multilingual model.</p>
        <p>Team SmurfCat, Submission adugeen (mT0-XL) [69] Multilingual model mT0-XL [67] was as
well used by this team. First, the model was fine-tuned for text generation using a combination of parallel
and translated datasets. The model was further aligned with the Odds Ratio Preference Optimization
(ORPO) [70]. During the inference stage, the best candidate generated by the model was chosen by
calculating scores from STA and SIM models.</p>
        <p>Team gleb.shnshn (zero-shot LLaMa-3) This solution was based on a modern open-source LLM—
LlaMa3-70B [71]. The model was prompted using the zero-shot prompting method for the detoxification
task.</p>
        <p>Team memu_pro_kotow, Submission SomethingAwful (few-shot LLaMa-3 &amp; mT0-XL) [72]
In this solution, “uncensored” LLaMa3 [71] was introduced and initialized for every target language
except Amharic. Using the recent alignment jailbreaking method by identifying “refusal” directions and
subtracting them from model weights [73], they used LLaMa3-70B to get predictions using a few-shot
prompting strategy. So, the model received 10 examples of detoxification via starting prompt. For the
Amharic language, the text-to-text mT0-XL [67] model was used: the model was fine-tuned on the
Amharic parallel dataset.</p>
        <p>
          Team Magnifying_Glass, Submission ZhongyuLuo (Translation &amp; BART-detox, ruT5-detox &amp;
Postprocessing) [74] The team used a combination of diferent methods and models depending on
the language. For the majority of languages, the participant used a text-to-text encoder-decoder NLLB
translation model [19] to translate data from various languages into English. Then, the translated data
was detoxified using the English BART-detox model [
          <xref ref-type="bibr" rid="ref13">13</xref>
          ]. After that, the resulting parallel synthetic
data was translated back into the original languages. For Russian, the specifically Russian
text-totext model—ruT5-base-detox [22]—was employed for the detoxification. In the case of Chinese, the
participants, firstly, applied filtering of the training dataset, fine-tuned the pre-trained ruGPT3 [
model, and applied the Delete method.
Team nlp_enjoyers, Submission shredder67 (mT5) [76] The participant employed a text-to-text
model mT5 [51]. The provided multilingual parallel data from the development phase was used for
ifne-tuning.
        </p>
        <p>Team NaiveNeuron, Submission erehulka (few-shot LLaMa-3) [77] The team used a text-to-text
Llama3 [71] which was prompted using a few-shot method.</p>
        <p>Team team0, Submission dkenco (few-shot Cotype-7b) In this case, the team put a stress solely
on the English and Russian languages. Two language-specific approaches were used based on Cotype-7b
model [78]. For English, there was employed a zero-shot prompting technique where the prompt
included brief instructions for the text detoxification task. For the Russian language, the team used
a few-shot approach: the system prompt included brief instructions for the task to be performed as
well as five randomly picked samples from the parallel development set. During inference, for both
languages, there were applied regular expressions intended as filters.</p>
        <p>Team NLPunks, Submission bmmikheev (few-shot LlaMa-3) This team used a text-to-text
Llama3-70B [71] by with a few-shot prompting method. For English and Russian, the generated output
was evaluated manually. For other languages, GPT-3.5 [58] was used to evaluate outputs. For all
languages, the system prompt was formulated in English.</p>
        <p>Team Iron Autobots, Submission razvor (few-shot LlaMa-3) The participant as well used a
text-to-text Llama3-70b [71] with a few-shot prompting method.</p>
        <p>Team MBZUAI-UnbabelDetox, Submission mkrisnai (few-shot GPT-3.5) In this team, a
twostep prompting approach was utilized. At the first step, GPT-3.5 [ 58] was prompted with a few-shot
method to produce synthetic detoxification data. Then, the resulting data was employed in the prompt
to GPT-3.5 to perform detoxification.</p>
        <p>Team Yekaterina29 (mT5-XL) The participant fine-tuned mT5-XL instances [ 51] on the provided
development multilingual parallel dataset.</p>
        <p>Almost all of the participant used the current state-of-the-art Large Language Models (LLMs), among
which are GPT-3.5 [58] and LLaMa-3 [71] models. To enhance the model’s performance on the task
of detoxification, most participants used the few-shot prompting method. Among smaller models,
mT5 [51] and mT0 [67] were utilized: usually, these models were fine-tuned using ad hoc filtering and
data augmentation techniques, for instance, as RAG and backtranslation. Additionally, region-specific
LLMs were also employed—Cotype [78] and Kimi.AI [56].</p>
      </sec>
    </sec>
    <sec id="sec-9">
      <title>8. Results</title>
      <p>Here, we provide the final results of the final test phase, of our tasks. The full detailed tables of results
per each language and per each metric can be found in Appendix A.</p>
      <sec id="sec-9-1">
        <title>8.1. Automatic Evaluation Leaderboard</title>
        <p>We received 20 submissions for the development phase automatic leaderboard and 31 submissions for
the test phase automatic leaderboard. Automatic evaluation leaderboards are publicly available online.20
The final leaderboard from the test automatic phase evaluation is presented in Table 3.
20https://codalab.lisn.upsaclay.fr/competitions/18243#results</p>
        <p>The leading solutions were consistent across most languages, except for Spanish, Chinese, and
Hindi. However, with the automatic evaluation leaderboard publicly available to all participants, some
teams focused on optimizing their models specifically for the evaluation metrics, leading to potential
overfitting.</p>
        <p>Most solutions surpassed the baseline for at least one language, and in some cases, participant
systems approached the performance of human references. However, except for Hindi, no participant
solution outperformed human references in the automatic evaluation across any language. Although
the automatic evaluation scores for human references across most languages hovered around a J score
of 0.7, the results for Chinese were notably poor, with the highest participant score being 0.178 and
the best human reference score at 0.201. The results leads to a further investigations of the robustness
of the automatic evaluation metrics.</p>
        <p>The top three teams across the majority of the languages generally employed a similar strategy,
ifne-tuning the mT0-XL text-to-text model. Team SmurfCat is holding the best automatic evaluation
scores for all the languages, which was achieved by additionally fine-tuning mT0-XL with a recent ORPO
alignment method. The majority of the submissions were multilingual, designed to cover all languages
within a single model. These models demonstrated consistent score distributions across languages,
with notable declines in performance for Chinese and Hindi. An exception was user ansafronov, who
achieved the top score specifically for Chinese.</p>
      </sec>
      <sec id="sec-9-2">
        <title>8.2. Human Evaluation Leaderboard</title>
        <p>After participants confirmed their submissions via a form, we received 17 entries for the human
evaluation phase. This evaluation was conducted on a subsample of 100 test set items through crowdsourcing.
The results of the human evaluation, organized by team and language, are publicly available.21 The
ifnal leaderboard based on human evaluation is presented in Table 4.</p>
        <p>The human evaluation leaderboard saw significant changes compared to the automatic evaluation
phase. Human references received high scores from the annotators, with J scores around 0.8 or
higher. However, not all teams surpassed the mT5 and Delete baselines. Interestingly, the Delete
baseline outperformed the mT5 text-to-text generation baseline in languages such as Arabic, Hindi,
Ukrainian, Russian, and Amharic. This indicates that not all multilingual models are equally proficient
in understanding and handling toxicity across diferent languages.</p>
        <p>In the human evaluation phase, participants’ solutions closely matched the human references, even
surpassing the provided references from parallel datasets in some languages. The top solution, after
manual evaluation, was presented by user SomethingAwful and was based on the “uncensored”
LLaMa3-70B language model. Interestingly, SomethingAwful’s solution did not achieve the highest
scores across all nine languages but excelled in Spanish, German, and Russian. The leader of the
automatic evaluation leaderboard, Team SmurfCat, secured second place. Participants nikita.sushko
and VitalyProtasov switched places in the manual leaderboard.</p>
        <p>Similar to the automatic leaderboard, human assessments revealed that certain models excelled in
specific languages. For instance, nikita.sushko and VitalyProtasov achieved top results in Arabic and
Hindi. Despite Team mkrisnai ranking 7th overall, their solution performed exceptionally well in
21https://github.com/textdetox/textdetox_clef_2024/tree/main/human_evaluation_results
English, Spanish, Russian, and Ukrainian. Additionally, Team Team cake secured the highest scores
specifically for English and Chinese.</p>
        <p>From the detailed results in Appendix A, it is evident that the solutions surpassed human references
in English, Spanish, and German, often achieving near-perfect fluency. However, this success does
not extend to other languages. These results highlight the impressive human-like text generation
capabilities of modern LLMs, though they still struggle with handling toxicity and maintaining consistent
controllable generation across languages. Future work should focus on developing more challenging
tasks, particularly in cross-lingual contexts.</p>
      </sec>
    </sec>
    <sec id="sec-10">
      <title>9. Conclusion</title>
      <p>In Multilingual Text Detoxification task at PAN 2024, participants were tasked with transforming text
style from toxic to non-toxic across nine languages: English, Spanish, German, Chinese, Arabic, Hindi,
Ukrainian, Russian, and Amharic. The task was divided into two phases: (i) development phase focused
on cross-lingual transfer approaches; (ii) test phase utilized parallel training data for all languages and
encouraged multilingual solutions. Participants’ submissions in both phases underwent evaluation
using a set of automatic metrics, followed by human evaluation of the test subset to determine the final
leaderboard rankings.</p>
      <p>Participants employed modern state-of-the-art Large Language Models either by prompting them in
few-shot formats or fine-tuning medium-sized models. For certain languages with suficient training
data, these models approached or even exceeded human reference provided in the shared task. However,
this was primarily observed for resource-rich European languages. Opportunities for enhancement
remain significant for less resource-rich languages and those with limited data, highlighting the need
for further exploration in cross-lingual text detoxification and knowledge transfer.</p>
    </sec>
    <sec id="sec-11">
      <title>Acknowledgment</title>
      <p>We express our deepest gratitude to Toloka platform to support our shared task. Crowdsourced data
collection and human evaluation were made possible through the provided research grant.
icencio (Eds.), Proceedings of the 60th Annual Meeting of the Association for Computational
Linguistics (Volume 1: Long Papers), ACL 2022, Dublin, Ireland, May 22-27, 2022, Association for
Computational Linguistics, 2022, pp. 6804–6818. URL: https://doi.org/10.18653/v1/2022.acl-long.469.
doi:10.18653/V1/2022.ACL-LONG.469.
[27] Jigsaw, Toxic comment classification challenge,
https://www.kaggle.com/c/jigsaw-toxic-commentclassification-challenge, 2017. Accessed: 2024-03-18.
[28] D. Dementieva, N. Babakov, A. Panchenko, Multiparadetox: Extending text detoxification with
parallel data to new languages, arXiv preprint arXiv:2404.02037 (2024).
[29] A. Belchikov, Russian language toxic comments,
https://www.kaggle.com/blackmoon/russianlanguage-toxic-comments, 2019. Accessed: 2023-12-14.
[30] A. Semiletov, Toxic Russian Comments: Labelled comments from the popular Russian social
network, https://www.kaggle.com/alexandersemiletov/toxic-russian-comments, 2020. Accessed:
2023-12-14.
[31] K. Bobrovnyk, Ukrainian obscene lexicon, https://github.com/saganoren/obscene-ukr, 2019.
Accessed: 2023-12-14.
[32] K. Bobrovnyk, Automated building and analysis of ukrainian twitter corpus for toxic text detection,
in: COLINS 2019. Volume II: Workshop, 2019. URL: https://ena.lpnu.ua:8443/server/api/core/
bitstreams/c4c645c1-f465-4895-98dd-765f862cf186/content.
[33] J. C. Pereira-Kohatsu, L. Q. Sánchez, F. Liberatore, M. Camacho-Collados, Detecting and monitoring
hate speech in twitter, Sensors 19 (2019) 4654. URL: https://doi.org/10.3390/s19214654. doi:10.
3390/S19214654.
[34] M. Taulé, M. Nofre, V. Bargiela, X. Bonet, Newscom-tox: a corpus of comments on news articles
annotated for toxicity in spanish, Language Resources and Evaluation (2024) 1–41.
[35] J. M. Pérez, D. A. Furman, L. Alonso Alemany, F. M. Luque, RoBERTuito: a pre-trained language
model for social media text in Spanish, in: Proceedings of the Thirteenth Language Resources and
Evaluation Conference, European Language Resources Association, Marseille, France, 2022, pp.
7235–7243. URL: https://aclanthology.org/2022.lrec-1.785.
[36] M. Wiegand, M. Siegel, J. Ruppenhofer, Overview of the GermEval 2018 Shared Task on the</p>
      <p>Identification of Ofensive Language (2018).
[37] J. Risch, A. Stoll, L. Wilms, M. Wiegand, Overview of the GermEval 2021 Shared Task on the
Identification of Toxic, Engaging, and Fact-Claiming Comments, in: Proceedings of the GermEval 2021
Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments, Duesseldorf,
Germany, 2021, pp. 1–12.
[38] B. Ross, M. Rist, G. Carbonell, B. Cabrera, N. Kurowsky, M. Wojatzki, Measuring the Reliability of
Hate Speech Annotations: The Case of the European Refugee Crisis, in: M. Beißwenger, M.
Wojatzki, T. Zesch (Eds.), Proceedings of NLP4CMC III: 3rd Workshop on Natural Language Processing
for Computer-Mediated Communication, volume 17 of Bochumer Linguistische Arbeitsberichte,
Bochum, Germany, 2016, pp. 6–.9.
[39] T. Mandl, S. Modha, P. Majumder, D. Patel, M. Dave, C. Mandlia, A. Patel, Overview of the
hasoc track at fire 2019: Hate speech and ofensive content identification in indo-european
languages, in: Proceedings of the 11th Annual Meeting of the Forum for Information Retrieval
Evaluation, FIRE ’19, Association for Computing Machinery, New York, NY, USA, 2019, p. 14–17.</p>
      <p>URL: https://doi.org/10.1145/3368567.3368584. doi:10.1145/3368567.3368584.
[40] A. A. Ayele, S. Dinter, T. D. Belay, T. T. Asfaw, S. M. Yimam, C. Biemann, The 5Js in Ethiopia:
Amharic hate speech data annotation using Toloka Crowdsourcing Platform, in: Proceedings of the
4th International Conference on Information and Communication Technology for Development
for Africa (ICT4DA), Bahir Dar, Ethiopia, 2022, pp. 114–120. URL: https://ieeexplore.ieee.org/
document/9971189.
[41] H. Mulki, H. Haddad, C. B. Ali, H. Alshabani, L-hsab: A levantine twitter dataset for hate speech
and abusive language, in: Proceedings of the third workshop on abusive language online, 2019, pp.
111–118.
[42] H. Haddad, H. Mulki, A. Oueslati, T-hsab: A tunisian hate speech and abusive dataset, in:</p>
      <p>International conference on Arabic language processing, Springer, 2019, pp. 251–263.
[43] H. Mubarak, K. Darwish, W. Magdy, T. Elsayed, H. Al-Khalifa, Overview of osact4 arabic ofensive
language detection shared task, in: Proceedings of the 4th Workshop on open-source arabic
corpora and processing tools, with a shared task on ofensive language detection, 2020, pp. 48–52.
[44] H. Mulki, B. Ghanem, Let-mi: An Arabic Levantine Twitter dataset for misogynistic language,
in: Proceedings of the Sixth Arabic Natural Language Processing Workshop, Association for
Computational Linguistics, Kyiv, Ukraine (Virtual), 2021, pp. 154–163. URL: https://aclanthology.
org/2021.wanlp-1.16.
[45] J. Lu, B. Xu, X. Zhang, C. Min, L. Yang, H. Lin, Facilitating fine-grained detection of Chinese
toxic language: Hierarchical taxonomy, resources, and benchmarks, in: A. Rogers, J. Boyd-Graber,
N. Okazaki (Eds.), Proceedings of the 61st Annual Meeting of the Association for Computational
Linguistics, 2023, pp. 16235–16250. URL: https://aclanthology.org/2023.acl-long.898.
[46] D. Dementieva, S. Ustyantsev, D. Dale, O. Kozlova, N. Semenov, A. Panchenko, V. Logacheva,
Crowdsourcing of parallel corpora: the case of style transfer for detoxification, in: D. Ustalov,
F. Casati, A. Drutsa, I. Stelmakh, N. Pavlichenko, D. Baidakova (Eds.), Proceedings of the 2nd Crowd
Science Workshop: Trust, Ethics, and Excellence in Crowdsourced Data Management at Scale
colocated with 47th International Conference on Very Large Data Bases (VLDB 2021), Copenhagen,
Denmark, August 20, 2021, volume 2932 of CEUR Workshop Proceedings, CEUR-WS.org, 2021, pp.
35–49. URL: https://ceur-ws.org/Vol-2932/paper2.pdf.
[47] R. J. Gabriel, English full list of bad words and top swear words banned by google,
https://github.com/cofee-and-fun/google-profanity-words/blob/main/data/en.txt, 2023. Accessed:
2023-12-12.
[48] K. Bobrovnyk, The dictionary of ukrainian obscene words,
https://github.com/saganoren/obsceneukr, 2019. Accessed: 2023-12-12.
[49] I. Shutterstock, List of dirty, naughty, obscene, and otherwise bad words,
https://github.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words, 2020.</p>
      <p>Accessed: 2023-12-12.
[50] A. Jiang, X. Yang, Y. Liu, A. Zubiaga, SWSR: A chinese dataset and lexicon for online sexism
detection, Online Soc. Networks Media 27 (2022) 100182. URL: https://doi.org/10.1016/j.osnem.
2021.100182. doi:10.1016/J.OSNEM.2021.100182.
[51] L. Xue, N. Constant, A. Roberts, M. Kale, R. Al-Rfou, A. Siddhant, A. Barua, C. Rafel, mt5: A
massively multilingual pre-trained text-to-text transformer, in: K. Toutanova, A. Rumshisky,
L. Zettlemoyer, D. Hakkani-Tür, I. Beltagy, S. Bethard, R. Cotterell, T. Chakraborty, Y. Zhou
(Eds.), Proceedings of the 2021 Conference of the North American Chapter of the Association for
Computational Linguistics: Human Language Technologies, NAACL-HLT 2021, Online, June 6-11,
2021, Association for Computational Linguistics, 2021, pp. 483–498. URL: https://doi.org/10.18653/
v1/2021.naacl-main.41. doi:10.18653/V1/2021.NAACL-MAIN.41.
[52] A. Conneau, K. Khandelwal, N. Goyal, V. Chaudhary, G. Wenzek, F. Guzmán, E. Grave, M. Ott,
L. Zettlemoyer, V. Stoyanov, Unsupervised cross-lingual representation learning at scale, in:
D. Jurafsky, J. Chai, N. Schluter, J. R. Tetreault (Eds.), Proceedings of the 58th Annual Meeting of
the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, Association for
Computational Linguistics, 2020, pp. 8440–8451. URL: https://doi.org/10.18653/v1/2020.acl-main.
747. doi:10.18653/V1/2020.ACL-MAIN.747.
[53] F. Feng, Y. Yang, D. Cer, N. Arivazhagan, W. Wang, Language-agnostic BERT sentence embedding,
in: S. Muresan, P. Nakov, A. Villavicencio (Eds.), Proceedings of the 60th Annual Meeting of the
Association for Computational Linguistics (Volume 1: Long Papers), ACL 2022, Dublin, Ireland,
May 22-27, 2022, Association for Computational Linguistics, 2022, pp. 878–891. URL: https://doi.
org/10.18653/v1/2022.acl-long.62. doi:10.18653/V1/2022.ACL-LONG.62.
[54] M. Post, A call for clarity in reporting BLEU scores, in: O. Bojar, R. Chatterjee, C. Federmann,
M. Fishel, Y. Graham, B. Haddow, M. Huck, A. Jimeno-Yepes, P. Koehn, C. Monz, M. Negri,
A. Névéol, M. L. Neves, M. Post, L. Specia, M. Turchi, K. Verspoor (Eds.), Proceedings of the
Third Conference on Machine Translation: Research Papers, WMT 2018, Belgium, Brussels,
October 31 - November 1, 2018, Association for Computational Linguistics, 2018, pp. 186–191. URL:
https://doi.org/10.18653/v1/w18-6319. doi:10.18653/V1/W18-6319.
[55] J. Peng, Z. Han, H. Zhang, J. Ye, C. Liu, B. Liu, M. Guo, H. Chen, Z. Lin, Y. Tang, A Multilingual
Text Detoxification Method Based on Few-shot Learning and CO-STAR Framework, in: G. Faggioli,
N. Ferro, P. Galuščáková, A. G. S. de Herrera (Eds.), Working Notes of CLEF 2024 - Conference and
Labs of the Evaluation Forum, CEUR-WS.org, 2024.
[56] M. AI, Kimi chatbot, 2024. URL: https://kimi.moonshot.cn, accessed: 2024-05-31.
[57] M. Vallecillo-Rodríguez, A. M. Martín-Valdivia, SINAI at PAN 2024 TextDetox: Application of
Tree of Thought Strategy in Large Language Models for Multilingual Text Detoxification, in:
G. Faggioli, N. Ferro, P. Galuščáková, A. G. S. de Herrera (Eds.), Working Notes of CLEF 2024
Conference and Labs of the Evaluation Forum, CEUR-WS.org, 2024.
[58] OpenAI, Chatgpt: Optimizing language models for dialogue, 2022. URL: https://openai.com/blog/
chatgpt, accessed: 2024-05-31.
[59] M. Najafi, E. Tavan, S. Colreavy, Marsan at PAN 2024 TextDetox: ToxiCleanse RL and Paving
the Way for Toxicity-Free Online Discourse, in: G. Faggioli, N. Ferro, P. Galuščáková, A. G. S.
de Herrera (Eds.), Working Notes of CLEF 2024 - Conference and Labs of the Evaluation Forum,
CEUR-WS.org, 2024.
[60] A. Q. Jiang, A. Sablayrolles, A. Mensch, C. Bamford, D. S. Chaplot, D. de Las Casas, F. Bressand,
G. Lengyel, G. Lample, L. Saulnier, L. R. Lavaud, M. Lachaux, P. Stock, T. L. Scao, T. Lavril, T. Wang,
T. Lacroix, W. E. Sayed, Mistral 7b, CoRR abs/2310.06825 (2023). URL: https://doi.org/10.48550/
arXiv.2310.06825. doi:10.48550/ARXIV.2310.06825. arXiv:2310.06825.
[61] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, O. Klimov, Proximal policy optimization algorithms,</p>
      <p>CoRR abs/1707.06347 (2017). URL: http://arxiv.org/abs/1707.06347. arXiv:1707.06347.
[62] L. von Werra, Y. Belkada, L. Tunstall, E. Beeching, T. Thrush, N. Lambert, S. Huang, Trl: Transformer
reinforcement learning, https://github.com/huggingface/trl, 2020.
[63] S. Gangopadhyay, M. Khan, H. Jabeen, HybridDetox: Combining Supervised and Unsupervised
Methods for Efective Multilingual Text Detoxification, in: G. Faggioli, N. Ferro, P. Galuščáková,
A. G. S. de Herrera (Eds.), Working Notes of CLEF 2024 - Conference and Labs of the Evaluation
Forum, CEUR-WS.org, 2024.
[64] C. Rafel, N. Shazeer, A. Roberts, K. Lee, S. Narang, M. Matena, Y. Zhou, W. Li, P. J. Liu, Exploring
the limits of transfer learning with a unified text-to-text transformer, J. Mach. Learn. Res. 21 (2020)
140:1–140:67. URL: http://jmlr.org/papers/v21/20-074.html.
[65] M. Lewis, Y. Liu, N. Goyal, M. Ghazvininejad, A. Mohamed, O. Levy, V. Stoyanov, L. Zettlemoyer,
BART: denoising sequence-to-sequence pre-training for natural language generation, translation,
and comprehension, in: D. Jurafsky, J. Chai, N. Schluter, J. R. Tetreault (Eds.), Proceedings of
the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online,
July 5-10, 2020, Association for Computational Linguistics, 2020, pp. 7871–7880. URL: https:
//doi.org/10.18653/v1/2020.acl-main.703. doi:10.18653/V1/2020.ACL-MAIN.703.
[66] V. Protasov, PAN 2024 Multilingual TextDetox: Exploring Cross-lingual Transfer in Case of Large
Language Models, in: G. Faggioli, N. Ferro, P. Galuščáková, A. G. S. de Herrera (Eds.), Working
Notes of CLEF 2024 - Conference and Labs of the Evaluation Forum, CEUR-WS.org, 2024.
[67] N. Muennighof, T. Wang, L. Sutawika, A. Roberts, S. Biderman, T. L. Scao, M. S. Bari, S. Shen,
Z. X. Yong, H. Schoelkopf, X. Tang, D. Radev, A. F. Aji, K. Almubarak, S. Albanie, Z. Alyafeai,
A. Webson, E. Raf, C. Rafel, Crosslingual generalization through multitask finetuning, in:
A. Rogers, J. L. Boyd-Graber, N. Okazaki (Eds.), Proceedings of the 61st Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto,
Canada, July 9-14, 2023, Association for Computational Linguistics, 2023, pp. 15991–16111. URL:
https://doi.org/10.18653/v1/2023.acl-long.891. doi:10.18653/V1/2023.ACL-LONG.891.
[68] N. Sushko, PAN 2024 Multilingual TextDetox: Exploring Diferent Regimes For Synthetic Data
Training For Multilingual Text Detoxification, in: G. Faggioli, N. Ferro, P. Galuščáková, A. G. S.
de Herrera (Eds.), Working Notes of CLEF 2024 - Conference and Labs of the Evaluation Forum,
CEUR-WS.org, 2024.
[69] E. Rykov, K. Zaytsev, I. Anisimov, A. Voronin, SmurfCat at PAN TexDetox 2024: Alignment of
Multilingual Transformers for Text Detoxification, in: G. Faggioli, N. Ferro, P. Galuščáková, A. G. S.
de Herrera (Eds.), Working Notes of CLEF 2024 - Conference and Labs of the Evaluation Forum,
CEUR-WS.org, 2024.
[70] J. Hong, N. Lee, J. Thorne, ORPO: monolithic preference optimization without reference model,
CoRR abs/2403.07691 (2024). URL: https://doi.org/10.48550/arXiv.2403.07691. doi:10.48550/
ARXIV.2403.07691. arXiv:2403.07691.
[71] AI@Meta, Llama 3 model card (2024). URL: https://github.com/meta-llama/llama3/blob/main/</p>
      <p>MODEL_CARD.md.
[72] S. Pletenev, Memu_pro_kotow at PAN 2024 TextDetox: Uncensored Llama3 Helps to Censor
Better, in: G. Faggioli, N. Ferro, P. Galuščáková, A. G. S. de Herrera (Eds.), Working Notes of CLEF
2024 - Conference and Labs of the Evaluation Forum, CEUR-WS.org, 2024.
[73] A. Zou, L. Phan, S. Chen, J. Campbell, P. Guo, R. Ren, A. Pan, X. Yin, M. Mazeika, A.
Dombrowski, S. Goel, N. Li, M. J. Byun, Z. Wang, A. Mallen, S. Basart, S. Koyejo, D. Song, M.
Fredrikson, J. Z. Kolter, D. Hendrycks, Representation engineering: A top-down approach to AI
transparency, CoRR abs/2310.01405 (2023). URL: https://doi.org/10.48550/arXiv.2310.01405.
doi:10.48550/ARXIV.2310.01405. arXiv:2310.01405.
[74] Z. Luo, M. Luo, A. Wang, Multilingual Text Detoxification Using Google Cloud Translation and
Post-Processing, in: G. Faggioli, N. Ferro, P. Galuščáková, A. G. S. de Herrera (Eds.), Working
Notes of CLEF 2024 - Conference and Labs of the Evaluation Forum, CEUR-WS.org, 2024.
[75] A. Forever, rugpt3, 2022. URL: https://huggingface.co/ai-forever, accessed: 2024-05-31.
[76] V. Zinkovich, S. Karpukhin, N. Kurdiukov, P. Tikhomirov, nlp_enjoyers at Multilingual Textual
Detoxification (CLEF-2024, in: G. Faggioli, N. Ferro, P. Galuščáková, A. G. S. de Herrera (Eds.),
Working Notes of CLEF 2024 - Conference and Labs of the Evaluation Forum, CEUR-WS.org, 2024.
[77] E. Řehulka, M. Šuppa, RAG Meets Detox: Enhancing Text Detoxification Using Open-Source Large
Language Models with Retrieval Augmented Generation, in: G. Faggioli, N. Ferro, P. Galuščáková,
A. G. S. de Herrera (Eds.), Working Notes of CLEF 2024 - Conference and Labs of the Evaluation
Forum, CEUR-WS.org, 2024.
[78] MTS.AI, Cotype: Generative ai solutions, 2022. URL: https://mts.ai, accessed: 2024-05-31.</p>
    </sec>
    <sec id="sec-12">
      <title>A. Automatic and Manual Evaluation Results per Language</title>
      <p>Here, we provide the extended results—from both automatic and human evaluation setups—based on
three evaluation parameters for all languages: English (Table 5), Spanish (Table 6), German (Table 7),
Chinese (Table 8), Arabic (Table 9), Hindi (Table 10), Ukrainian (Table 11), Russian (Table 12), and
Amharic (Table 13). In every table, the baselines are highlighted with gray ; Human References are
highlighted with green ; the ordering is made by J score from Human Evaluation results. The
automatic evaluation is based on the full test set of 600 samples per language; human evaluation was
performed on 100 set of the test set per language.</p>
      <p>J</p>
      <p>J</p>
      <p>J</p>
      <p>J*</p>
    </sec>
  </body>
  <back>
    <ref-list>
      <ref id="ref1">
        <mixed-citation>
          [1]
          <string-name>
            <given-names>Z. R.</given-names>
            <surname>Shi</surname>
          </string-name>
          ,
          <string-name>
            <given-names>C.</given-names>
            <surname>Wang</surname>
          </string-name>
          ,
          <string-name>
            <given-names>F.</given-names>
            <surname>Fang</surname>
          </string-name>
          ,
          <article-title>Artificial intelligence for social good: A survey</article-title>
          , CoRR abs/
          <year>2001</year>
          .
          <year>01818</year>
          (
          <year>2020</year>
          ). URL: http://arxiv.org/abs/
          <year>2001</year>
          .
          <year>01818</year>
          . arXiv:
          <year>2001</year>
          .
          <year>01818</year>
          .
        </mixed-citation>
      </ref>
      <ref id="ref2">
        <mixed-citation>
          [2]
          <string-name>
            <given-names>Y.</given-names>
            <surname>Yao</surname>
          </string-name>
          ,
          <string-name>
            <given-names>J.</given-names>
            <surname>Duan</surname>
          </string-name>
          ,
          <string-name>
            <given-names>K.</given-names>
            <surname>Xu</surname>
          </string-name>
          ,
          <string-name>
            <given-names>Y.</given-names>
            <surname>Cai</surname>
          </string-name>
          , E. Sun,
          <string-name>
            <surname>Y. Zhang,</surname>
          </string-name>
          <article-title>A survey on large language model (LLM) security and privacy: The good, the bad, and the ugly</article-title>
          ,
          <source>CoRR abs/2312</source>
          .
          <year>02003</year>
          (
          <year>2023</year>
          ). URL: https: //doi.org/10.48550/arXiv.2312.
          <year>02003</year>
          . doi:
          <volume>10</volume>
          .48550/ARXIV.2312.
          <year>02003</year>
          . arXiv:
          <fpage>2312</fpage>
          .
          <year>02003</year>
          .
        </mixed-citation>
      </ref>
      <ref id="ref3">
        <mixed-citation>
          [3]
          <string-name>
            <given-names>J.</given-names>
            <surname>Cobbe</surname>
          </string-name>
          ,
          <article-title>Algorithmic censorship by social platforms: Power and resistance</article-title>
          ,
          <source>Philosophy &amp; Technology</source>
          <volume>34</volume>
          (
          <year>2021</year>
          )
          <fpage>739</fpage>
          -
          <lpage>766</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref4">
        <mixed-citation>
          [4]
          <string-name>
            <given-names>B.</given-names>
            <surname>Mathew</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Saha</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S. M.</given-names>
            <surname>Yimam</surname>
          </string-name>
          ,
          <string-name>
            <given-names>C.</given-names>
            <surname>Biemann</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Goyal</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Mukherjee</surname>
          </string-name>
          ,
          <article-title>Hatexplain: A benchmark dataset for explainable hate speech detection</article-title>
          ,
          <source>in: Thirty-Fifth AAAI Conference on Artificial Intelligence</source>
          ,
          <source>AAAI 2021, Thirty-Third Conference on Innovative Applications of Artificial Intelligence, IAAI</source>
          <year>2021</year>
          ,
          <source>The Eleventh Symposium on Educational Advances in Artificial Intelligence, EAAI</source>
          <year>2021</year>
          ,
          <string-name>
            <given-names>Virtual</given-names>
            <surname>Event</surname>
          </string-name>
          ,
          <source>February 2-9</source>
          ,
          <year>2021</year>
          , AAAI Press,
          <year>2021</year>
          , pp.
          <fpage>14867</fpage>
          -
          <lpage>14875</lpage>
          . URL: https://doi.org/10.1609/aaai.v35i17.17745. doi:
          <volume>10</volume>
          .1609/AAAI.V35I17.17745.
        </mixed-citation>
      </ref>
      <ref id="ref5">
        <mixed-citation>
          [5]
          <string-name>
            <given-names>J. M.</given-names>
            <surname>Molero</surname>
          </string-name>
          ,
          <string-name>
            <given-names>J.</given-names>
            <surname>Pérez-Martín</surname>
          </string-name>
          , Á. Rodrigo,
          <string-name>
            <given-names>A.</given-names>
            <surname>Peñas</surname>
          </string-name>
          ,
          <article-title>Ofensive language detection in spanish social media: Testing from bag-of-words to transformers models</article-title>
          ,
          <source>IEEE Access 11</source>
          (
          <year>2023</year>
          )
          <fpage>95639</fpage>
          -
          <lpage>95652</lpage>
          . URL: https://doi.org/10.1109/ACCESS.
          <year>2023</year>
          .
          <volume>3310244</volume>
          . doi:
          <volume>10</volume>
          .1109/ACCESS.
          <year>2023</year>
          .
          <volume>3310244</volume>
          .
        </mixed-citation>
      </ref>
      <ref id="ref6">
        <mixed-citation>
          [6]
          <string-name>
            <given-names>A. A.</given-names>
            <surname>Ayele</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S. M.</given-names>
            <surname>Yimam</surname>
          </string-name>
          ,
          <string-name>
            <given-names>T. D.</given-names>
            <surname>Belay</surname>
          </string-name>
          ,
          <string-name>
            <given-names>T.</given-names>
            <surname>Asfaw</surname>
          </string-name>
          ,
          <string-name>
            <given-names>C.</given-names>
            <surname>Biemann</surname>
          </string-name>
          ,
          <article-title>Exploring Amharic hate speech data collection and classification approaches</article-title>
          , in: R. Mitkov, G. Angelova (Eds.),
          <source>Proceedings of the 14th International Conference on Recent Advances in Natural Language Processing</source>
          , INCOMA Ltd.,
          <string-name>
            <surname>Shoumen</surname>
          </string-name>
          , Bulgaria, Varna, Bulgaria,
          <year>2023</year>
          , pp.
          <fpage>49</fpage>
          -
          <lpage>59</lpage>
          . URL: https://aclanthology.org/
          <year>2023</year>
          .ranlp-
          <volume>1</volume>
          .6.
        </mixed-citation>
      </ref>
      <ref id="ref7">
        <mixed-citation>
          [7]
          <string-name>
            <given-names>A.</given-names>
            <surname>Bohra</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Vijay</surname>
          </string-name>
          ,
          <string-name>
            <given-names>V.</given-names>
            <surname>Singh</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S. S.</given-names>
            <surname>Akhtar</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Shrivastava</surname>
          </string-name>
          ,
          <article-title>A dataset of Hindi-English codemixed social media text for hate speech detection</article-title>
          ,
          <source>in: Proceedings of the Second Workshop on Computational Modeling of People's Opinions</source>
          , Personality, and Emotions in Social Media, Association for Computational Linguistics, New Orleans, Louisiana, USA,
          <year>2018</year>
          , pp.
          <fpage>36</fpage>
          -
          <lpage>41</lpage>
          . URL: https://aclanthology.org/W18-1105. doi:
          <volume>10</volume>
          .18653/v1/
          <fpage>W18</fpage>
          -1105.
        </mixed-citation>
      </ref>
      <ref id="ref8">
        <mixed-citation>
          [8]
          <string-name>
            <given-names>M. R.</given-names>
            <surname>Costa-jussà</surname>
          </string-name>
          , M. C. Meglioli,
          <string-name>
            <given-names>P.</given-names>
            <surname>Andrews</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Dale</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Hansanti</surname>
          </string-name>
          ,
          <string-name>
            <given-names>E.</given-names>
            <surname>Kalbassi</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Mourachko</surname>
          </string-name>
          ,
          <string-name>
            <given-names>C.</given-names>
            <surname>Ropers</surname>
          </string-name>
          ,
          <string-name>
            <given-names>C.</given-names>
            <surname>Wood</surname>
          </string-name>
          , Mutox:
          <article-title>Universal multilingual audio-based toxicity dataset and zero-shot detector</article-title>
          ,
          <source>CoRR abs/2401</source>
          .05060 (
          <year>2024</year>
          ). URL: https://doi.org/10.48550/arXiv.2401.05060. doi:
          <volume>10</volume>
          . 48550/ARXIV.2401.05060. arXiv:
          <volume>2401</volume>
          .
          <fpage>05060</fpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref9">
        <mixed-citation>
          [9]
          <string-name>
            <given-names>E.</given-names>
            <surname>Kulenović</surname>
          </string-name>
          ,
          <article-title>Should democracies ban hate speech? hate speech laws and counterspeech</article-title>
          ,
          <source>Ethical Theory and Moral Practice</source>
          <volume>26</volume>
          (
          <year>2023</year>
          )
          <fpage>511</fpage>
          -
          <lpage>532</lpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref10">
        <mixed-citation>
          [10]
          <string-name>
            <given-names>J.</given-names>
            <surname>Li</surname>
          </string-name>
          ,
          <string-name>
            <given-names>R.</given-names>
            <surname>Jia</surname>
          </string-name>
          ,
          <string-name>
            <given-names>H.</given-names>
            <surname>He</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Liang</surname>
          </string-name>
          , Delete, retrieve, generate
          <article-title>: a simple approach to sentiment and style transfer</article-title>
          , in: M. A.
          <string-name>
            <surname>Walker</surname>
            ,
            <given-names>H.</given-names>
          </string-name>
          <string-name>
            <surname>Ji</surname>
            ,
            <given-names>A</given-names>
          </string-name>
          . Stent (Eds.),
          <source>Proceedings of the</source>
          <year>2018</year>
          <article-title>Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies</article-title>
          , NAACL-HLT
          <year>2018</year>
          , New Orleans, Louisiana, USA, June 1-6,
          <year>2018</year>
          , Volume
          <volume>1</volume>
          (
          <string-name>
            <surname>Long</surname>
            <given-names>Papers)</given-names>
          </string-name>
          ,
          <source>Association for Computational Linguistics</source>
          ,
          <year>2018</year>
          , pp.
          <fpage>1865</fpage>
          -
          <lpage>1874</lpage>
          . URL: https://doi.org/10. 18653/v1/n18-
          <fpage>1169</fpage>
          . doi:
          <volume>10</volume>
          .18653/V1/N18-1169.
        </mixed-citation>
      </ref>
      <ref id="ref11">
        <mixed-citation>
          [11]
          <string-name>
            <surname>C.</surname>
          </string-name>
          <article-title>Nogueira dos Santos, I. Melnyk, I. Padhi, Fighting ofensive language on social media with unsupervised text style transfer</article-title>
          ,
          <source>in: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume</source>
          <volume>2</volume>
          :
          <string-name>
            <surname>Short</surname>
            <given-names>Papers)</given-names>
          </string-name>
          ,
          <source>Association for Computational Linguistics</source>
          , Melbourne, Australia,
          <year>2018</year>
          , pp.
          <fpage>189</fpage>
          -
          <lpage>194</lpage>
          . URL: https://aclanthology.org/P18-2031. doi:
          <volume>10</volume>
          .18653/ v1/
          <fpage>P18</fpage>
          -2031.
        </mixed-citation>
      </ref>
      <ref id="ref12">
        <mixed-citation>
          [12]
          <string-name>
            <given-names>D.</given-names>
            <surname>Dale</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Voronov</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Dementieva</surname>
          </string-name>
          ,
          <string-name>
            <given-names>V.</given-names>
            <surname>Logacheva</surname>
          </string-name>
          ,
          <string-name>
            <given-names>O.</given-names>
            <surname>Kozlova</surname>
          </string-name>
          ,
          <string-name>
            <given-names>N.</given-names>
            <surname>Semenov</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Panchenko</surname>
          </string-name>
          ,
          <article-title>Text detoxification using large pre-trained neural models</article-title>
          ,
          <source>in: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing</source>
          , Association for Computational Linguistics, Online and
          <string-name>
            <given-names>Punta</given-names>
            <surname>Cana</surname>
          </string-name>
          , Dominican Republic,
          <year>2021</year>
          , pp.
          <fpage>7979</fpage>
          -
          <lpage>7996</lpage>
          . URL: https://aclanthology.org/
          <year>2021</year>
          .emnlp-main.
          <volume>629</volume>
          . doi:
          <volume>10</volume>
          .18653/v1/
          <year>2021</year>
          .emnlp-main.
          <volume>629</volume>
          .
        </mixed-citation>
      </ref>
      <ref id="ref13">
        <mixed-citation>
          [13]
          <string-name>
            <given-names>V.</given-names>
            <surname>Logacheva</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Dementieva</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S.</given-names>
            <surname>Ustyantsev</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Moskovskiy</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Dale</surname>
          </string-name>
          , I. Krotova,
          <string-name>
            <given-names>N.</given-names>
            <surname>Semenov</surname>
          </string-name>
          ,
          <string-name>
            <surname>A</surname>
          </string-name>
          . Panchenko,
          <article-title>ParaDetox: Detoxification with parallel data, in: Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics</article-title>
          (Volume
          <volume>1</volume>
          :
          <string-name>
            <surname>Long</surname>
            <given-names>Papers)</given-names>
          </string-name>
          ,
          <source>Association for Computational Linguistics</source>
          , Dublin, Ireland,
          <year>2022</year>
          , pp.
          <fpage>6804</fpage>
          -
          <lpage>6818</lpage>
          . URL: https://aclanthology. org/
          <year>2022</year>
          .
          <article-title>acl-long</article-title>
          .
          <volume>469</volume>
          . doi:
          <volume>10</volume>
          .18653/v1/
          <year>2022</year>
          .
          <article-title>acl-long</article-title>
          .
          <volume>469</volume>
          .
        </mixed-citation>
      </ref>
      <ref id="ref14">
        <mixed-citation>
          [14]
          <string-name>
            <given-names>K.</given-names>
            <surname>Atwell</surname>
          </string-name>
          ,
          <string-name>
            <given-names>S.</given-names>
            <surname>Hassan</surname>
          </string-name>
          ,
          <string-name>
            <surname>M.</surname>
          </string-name>
          <article-title>Alikhani, APPDIA: A discourse-aware transformer-based style transfer model for ofensive social media conversations</article-title>
          , in: N.
          <string-name>
            <surname>Calzolari</surname>
            ,
            <given-names>C.</given-names>
          </string-name>
          <string-name>
            <surname>Huang</surname>
            ,
            <given-names>H.</given-names>
          </string-name>
          <string-name>
            <surname>Kim</surname>
            ,
            <given-names>J.</given-names>
          </string-name>
          <string-name>
            <surname>Pustejovsky</surname>
            ,
            <given-names>L.</given-names>
          </string-name>
          <string-name>
            <surname>Wanner</surname>
            ,
            <given-names>K.</given-names>
          </string-name>
          <string-name>
            <surname>Choi</surname>
            ,
            <given-names>P.</given-names>
          </string-name>
          <string-name>
            <surname>Ryu</surname>
            ,
            <given-names>H.</given-names>
          </string-name>
          <string-name>
            <surname>Chen</surname>
            ,
            <given-names>L.</given-names>
          </string-name>
          <string-name>
            <surname>Donatelli</surname>
            ,
            <given-names>H.</given-names>
          </string-name>
          <string-name>
            <surname>Ji</surname>
            ,
            <given-names>S.</given-names>
          </string-name>
          <string-name>
            <surname>Kurohashi</surname>
            ,
            <given-names>P.</given-names>
          </string-name>
          <string-name>
            <surname>Paggio</surname>
            ,
            <given-names>N.</given-names>
          </string-name>
          <string-name>
            <surname>Xue</surname>
            ,
            <given-names>S.</given-names>
          </string-name>
          <string-name>
            <surname>Kim</surname>
            ,
            <given-names>Y.</given-names>
          </string-name>
          <string-name>
            <surname>Hahm</surname>
            ,
            <given-names>Z.</given-names>
          </string-name>
          <string-name>
            <surname>He</surname>
            ,
            <given-names>T. K.</given-names>
          </string-name>
          <string-name>
            <surname>Lee</surname>
            ,
            <given-names>E.</given-names>
          </string-name>
          <string-name>
            <surname>Santus</surname>
            ,
            <given-names>F.</given-names>
          </string-name>
          <string-name>
            <surname>Bond</surname>
          </string-name>
          , S. Na (Eds.),
          <source>Proceedings of the 29th International Conference on Computational Linguistics</source>
          ,
          <string-name>
            <surname>COLING</surname>
          </string-name>
          <year>2022</year>
          , Gyeongju, Republic of Korea,
          <source>October 12-17</source>
          ,
          <year>2022</year>
          ,
          <source>International Committee on Computational Linguistics</source>
          ,
          <year>2022</year>
          , pp.
          <fpage>6063</fpage>
          -
          <lpage>6074</lpage>
          . URL: https://aclanthology.org/
          <year>2022</year>
          .coling-
          <volume>1</volume>
          .
          <fpage>530</fpage>
          .
        </mixed-citation>
      </ref>
      <ref id="ref15">
        <mixed-citation>
          [15]
          <string-name>
            <given-names>D.</given-names>
            <surname>Dementieva</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Moskovskiy</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Dale</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Panchenko</surname>
          </string-name>
          ,
          <article-title>Exploring methods for cross-lingual text style transfer: The case of text detoxification</article-title>
          , in: J. C. Park,
          <string-name>
            <given-names>Y.</given-names>
            <surname>Arase</surname>
          </string-name>
          ,
          <string-name>
            <given-names>B.</given-names>
            <surname>Hu</surname>
          </string-name>
          ,
          <string-name>
            <given-names>W.</given-names>
            <surname>Lu</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Wijaya</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Purwarianti</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A. A.</given-names>
            <surname>Krisnadhi</surname>
          </string-name>
          (Eds.),
          <source>Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd</source>
          <article-title>Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics</article-title>
          ,
          <source>IJCNLP 2023 -Volume</source>
          <volume>1</volume>
          :
          <string-name>
            <given-names>Long</given-names>
            <surname>Papers</surname>
          </string-name>
          , Nusa Dua, Bali, November 1 -
          <issue>4</issue>
          ,
          <year>2023</year>
          , Association for Computational Linguistics,
          <year>2023</year>
          , pp.
          <fpage>1083</fpage>
          -
          <lpage>1101</lpage>
          . URL: https://doi.org/10. 18653/v1/
          <year>2023</year>
          .ijcnlp-main.
          <volume>70</volume>
          . doi:
          <volume>10</volume>
          .18653/V1/
          <year>2023</year>
          .IJCNLP-MAIN.
          <year>70</year>
          .
        </mixed-citation>
      </ref>
      <ref id="ref16">
        <mixed-citation>
          [16]
          <string-name>
            <given-names>J.</given-names>
            <surname>Bevendorf</surname>
          </string-name>
          ,
          <string-name>
            <given-names>X. B.</given-names>
            <surname>Casals</surname>
          </string-name>
          ,
          <string-name>
            <given-names>B.</given-names>
            <surname>Chulvi</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Dementieva</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Elnagar</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Freitag</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Fröbe</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Korenčić</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Mayerl</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Mukherjee</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Panchenko</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Potthast</surname>
          </string-name>
          ,
          <string-name>
            <given-names>F.</given-names>
            <surname>Rangel</surname>
          </string-name>
          ,
          <string-name>
            <given-names>P.</given-names>
            <surname>Rosso</surname>
          </string-name>
          ,
          <string-name>
            <given-names>A.</given-names>
            <surname>Smirnova</surname>
          </string-name>
          ,
          <string-name>
            <given-names>E.</given-names>
            <surname>Stamatatos</surname>
          </string-name>
          ,
          <string-name>
            <given-names>B.</given-names>
            <surname>Stein</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Taulé</surname>
          </string-name>
          ,
          <string-name>
            <given-names>D.</given-names>
            <surname>Ustalov</surname>
          </string-name>
          ,
          <string-name>
            <given-names>M.</given-names>
            <surname>Wiegmann</surname>
          </string-name>
          , E. Zangerle,
          <article-title>Overview of PAN 2024: Multi-Author Writing Style Analysis</article-title>
          ,
          <source>Multilingual Text Detoxification</source>
          , Oppositional Thinking
        </mixed-citation>
      </ref>
    </ref-list>
  </back>
</article>