File size: 17,954 Bytes
3dac6f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e428d1
 
 
3dac6f9
 
 
 
 
 
 
2e428d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3dac6f9
 
 
 
 
 
 
 
 
2e428d1
 
 
 
 
 
 
 
 
 
 
3dac6f9
 
 
2e428d1
 
 
 
 
 
 
 
 
 
 
 
 
 
3dac6f9
 
 
2e428d1
3dac6f9
 
 
 
 
2e428d1
 
 
3dac6f9
 
 
 
 
2e428d1
 
 
 
 
 
3dac6f9
 
 
2e428d1
 
 
 
 
 
 
3dac6f9
 
 
 
 
2e428d1
 
 
 
 
 
 
 
3dac6f9
 
 
2e428d1
 
 
 
 
 
 
3dac6f9
 
 
2e428d1
3dac6f9
 
 
 
 
2e428d1
3dac6f9
 
 
2e428d1
 
 
 
 
 
3dac6f9
 
 
2e428d1
 
 
 
3dac6f9
 
 
 
 
2e428d1
 
 
 
 
 
 
 
3dac6f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e428d1
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
---
annotations_creators:
  - other
language_creators:
  - other
language:
  - sk
license:
  - other
  - cc-by-sa-4.0
  - cc-by-sa-3.0
  - mit
multilinguality:
  - monolingual
size_categories:
  - 10K<n<100K
source_datasets:
  - original
task_categories:
  - question-answering
  - text-classification
  - token-classification
task_ids:
  - extractive-qa
  - named-entity-recognition
  - acceptability-classification
  - natural-language-inference
  - semantic-similarity-scoring
  - sentiment-classification
  - text-scoring
paperswithcode_id: skLEP
pretty_name: skLEP (General Language Understanding Evaluation benchmark for Slovak)
tags:
  - qa-nli
  - coreference-nli
  - paraphrase-identification

config_names:
  - hate-speech
  - sentiment-analysis
  - ner-wikigoldsk
  - ner-uner
  - pos
  - question-answering
  - rte
  - nli
  - sts
dataset_info:
  - config_name: hate-speech
    features:
      - name: text
        dtype: string
      - name: label
        dtype:
          class_label:
            names:
              "0": negative
              "1": positive
      - name: id
        dtype: int32
    splits:
      - name: train
        num_bytes: 1393604
        num_examples: 10531
      - name: test
        num_bytes: 150919
        num_examples: 1319
      - name: validation
        num_bytes: 160199
        num_examples: 1339
    download_size: 326394
    dataset_size: 605704
  - config_name: sentiment-analysis
    features:
      - name: text
        dtype: string
      - name: label
        dtype:
          class_label:
            names:
              "0": negative
              "1": positive
      - name: id
        dtype: int32
    splits:
      - name: train
        num_bytes: 387491
        num_examples: 3560
      - name: test
        num_bytes: 117983
        num_examples: 1042
      - name: validation
        num_bytes: 117983
        num_examples: 522
    download_size: 326394
    dataset_size: 605704
  - config_name: ner-wikigoldsk
    features:
      - name: sentence
        dtype: string
      - name: tokens
        sequence: string
      - name: ner_tags
        sequence:
          class_label:
            names:
              0: "O"
              1: "B-LOC"
              2: "I-LOC"
              3: "B-ORG"
              4: "I-ORG"
              5: "B-PER"
              6: "I-PER"
              7: "B-MISC"
              8: "I-MISC"
      - name: ner_tags_text
        sequence: string
    splits:
      - name: train
        num_bytes: 1885504
        num_examples: 4687
      - name: validation
        num_bytes: 267514
        num_examples: 669
      - name: test
        num_bytes: 532642
        num_examples: 1340
  - config_name: ner-uner
    features:
      - name: sentence
        dtype: string
      - name: tokens
        sequence: string
      - name: ner_tags
        sequence:
          class_label:
            names:
              0: "O"
              1: "B-LOC"
              2: "I-LOC"
              3: "B-ORG"
              4: "I-ORG"
              5: "B-PER"
              6: "I-PER"
              7: "B-MISC"
              8: "I-MISC"
      - name: ner_tags_text
        sequence: string
    splits:
      - name: train
        num_bytes: 1786598
        num_examples: 8483
      - name: validation
        num_bytes: 289084
        num_examples: 1060
      - name: test
        num_bytes: 289026
        num_examples: 1061
  - config_name: pos
    features:
      - name: id
        dtype: string
      - name: tokens
        sequence: string
      - name: pos_tags
        sequence: string
    splits:
      - name: train
        num_bytes: 1786598
        num_examples: 8483
      - name: validation
        num_bytes: 289084
        num_examples: 1060
      - name: test
        num_bytes: 289026
        num_examples: 1061
  - config_name: question-answering
    features:
      - name: id
        dtype: string
      - name: title
        dtype: string
      - name: context
        dtype: string
      - name: question
        dtype: string
      - name: answers
        struct:
          - name: text
            sequence: string
          - name: answer_start
            sequence: int32
    splits:
      - name: train
        num_bytes: 98742578
        num_examples: 71999
      - name: validation
        num_bytes: 13100270
        num_examples: 9583
      - name: test
        num_bytes: 12992195
        num_examples: 9583
  - config_name: rte
    features:
      - name: text1
        dtype: string
      - name: text2
        dtype: string
      - name: label
        dtype:
          class_label:
            names:
              "0": not entailment
              "1": entailment
      - name: idx
        dtype: int32
      - name: label_text
        dtype: string
      - name: text1_orig
        dtype: string
      - name: text2_orig
        dtype: string
    splits:
      - name: train
        num_bytes: 2134837
        num_examples: 2490
      - name: validation
        num_bytes: 229013
        num_examples: 277
      - name: test
        num_bytes: 1255739
        num_examples: 1660
  - config_name: nli
    features:
      - name: premise
        dtype: string
      - name: hypothesis
        dtype: string
      - name: label
        dtype:
          class_label:
            names:
              "0": neutral
              "1": entailment
              "2": contradiction
      - name: premise_orig
        dtype: string
      - name: hypothesis_orig
        dtype: string
    splits:
      - name: train
        num_bytes: 142579745
        num_examples: 392702
      - name: validation
        num_bytes: 1138053
        num_examples: 2490
      - name: test
        num_bytes: 2294209
        num_examples: 5004
  - config_name: sts
    features:
      - name: sentence1
        dtype: string
      - name: sentence2
        dtype: string
      - name: similarity_score
        dtype: float64
      - name: sentence1_orig
        dtype: string
      - name: sentence2_orig
        dtype: string
    splits:
      - name: train
        num_examples: 5604
        num_bytes: 2184171
      - name: validation
        num_examples: 1481
        num_bytes: 617309
      - name: test
        num_examples: 1352
        num_bytes: 493116

configs:
  - config_name: hate-speech
    data_files:
      - split: test
        path: hate-speech/test.json
      - split: train
        path: hate-speech/train.json
      - split: validation
        path: hate-speech/validation.json
  - config_name: sentiment-analysis
    data_files:
      - split: test
        path: sentiment-analysis/test.json
      - split: train
        path: sentiment-analysis/train.json
      - split: validation
        path: sentiment-analysis/validation.json
  - config_name: ner-wikigoldsk
    data_files:
      - split: test
        path: ner-wikigoldsk/test.jsonl
      - split: train
        path: ner-wikigoldsk/train.jsonl
      - split: validation
        path: ner-wikigoldsk/dev.jsonl
  - config_name: ner-uner
    data_files:
      - split: test
        path: ner-uner/test.jsonl
      - split: train
        path: ner-uner/train.jsonl
      - split: validation
        path: ner-uner/dev.jsonl
  - config_name: pos
    data_files:
      - split: test
        path: pos/test.jsonl
      - split: validation
        path: pos/dev.jsonl
      - split: train
        path: pos/train.jsonl
  - config_name: question-answering
    data_files:
      - split: test
        path: question-answering/test.json
      - split: validation
        path: question-answering/validation.json
      - split: train
        path: question-answering/train.json
  - config_name: rte
    data_files:
      - split: test
        path: rte/test.json
      - split: validation
        path: rte/validation.json
      - split: train
        path: rte/train.json
  - config_name: nli
    data_files:
      - split: test
        path: nli/test.json
      - split: validation
        path: nli/validation.json
      - split: train
        path: nli/train.json
  - config_name: sts
    data_files:
      - split: test
        path: sts/test.json
      - split: validation
        path: sts/validation.json
      - split: train
        path: sts/train.json
---

# Dataset Card for skLEP

## Dataset Description

skLEP (General Language Understanding Evaluation benchmark for Slovak) is the first comprehensive benchmark specifically designed for evaluating Slovak natural language understanding (NLU) models. The benchmark encompasses nine diverse tasks that span token-level, sentence-pair, and document-level challenges, thereby offering a thorough assessment of model capabilities.

To create this benchmark, we curated new, original datasets tailored for Slovak and meticulously translated established English NLU resources with native speaker post-editing to ensure high quality evaluation.

### Dataset Summary

skLEP, the General Language Understanding Evaluation benchmark for Slovak is a collection of resources for training, evaluating, and analyzing natural language understanding systems.

### Supported Tasks and Leaderboards

skLEP includes nine tasks across three categories:

**Token-Level Tasks:**

- Part-of-Speech (POS) Tagging using Universal Dependencies
- Named Entity Recognition using Universal NER (UNER)
- Named Entity Recognition using WikiGoldSK (WGSK)

**Sentence-Pair Tasks:**

- Recognizing Textual Entailment (RTE)
- Natural Language Inference (NLI)
- Semantic Textual Similarity (STS)

**Document-Level Tasks:**

- Hate Speech Classification (HS)
- Sentiment Analysis (SA)
- Question Answering (QA) based on SK-QuAD

A public leaderboard is available at <https://github.com/slovak-nlp/sklep>

### Languages

The language data in skLEP is in Slovak (BCP-47 `sk`)

## Dataset Structure

### Data Instances

The benchmark contains the following data splits:

- **hate-speech**: 10,531 train, 1,339 validation, 1,319 test examples
- **sentiment-analysis**: 3,560 train, 522 validation, 1,042 test examples
- **ner-wikigoldsk**: 4,687 train, 669 validation, 1,340 test examples
- **ner-uner**: 8,483 train, 1,060 validation, 1,061 test examples
- **pos**: 8,483 train, 1,060 validation, 1,061 test examples
- **question-answering**: 71,999 train, 9,583 validation, 9,583 test examples
- **rte**: 2,490 train, 277 validation, 1,660 test examples
- **nli**: 392,702 train, 2,490 validation, 5,004 test examples
- **sts**: 5,604 train, 1,481 validation, 1,352 test examples

### Data Fields

Each task has specific data fields:

**Token-level tasks** (UD, UNER, WGSK): `sentence`, `tokens`, `ner_tags`/`pos_tags`, `ner_tags_text`

**Sentence-pair tasks**:

- RTE: `text1`, `text2`, `label`, `idx`, `label_text`
- NLI: `premise`, `hypothesis`, `label`
- STS: `sentence1`, `sentence2`, `similarity_score`

**Document-level tasks**:

- Hate Speech/Sentiment: `text`, `label`, `id`
- Question Answering: `id`, `title`, `context`, `question`, `answers`

### Data Splits

All tasks follow a standard train/validation/test split structure. Some datasets (HS and QA) originally only had train/test splits, so validation sets were created by sampling from the training data to match the test set size.

## Dataset Creation

### Curation Rationale

skLEP was created to address the lack of a comprehensive benchmark for Slovak natural language understanding. While similar benchmarks exist for other Slavic languages (Bulgarian, Polish, Russian, Slovene), no equivalent existed for Slovak despite the emergence of several Slovak-specific large language models.

The benchmark was designed to provide a principled tool for evaluating language understanding capabilities across diverse tasks, enabling systematic comparison of Slovak-specific, multilingual, and English pre-trained models.

### Source Data

#### Initial Data Collection and Normalization

Data was collected from multiple sources:

- **Existing Slovak datasets**: Universal Dependencies, Universal NER, WikiGoldSK, Slovak Hate Speech Database, Reviews3, SK-QuAD
- **Translated datasets**: RTE, NLI (XNLI), and STS were translated from English using machine translation services followed by native speaker post-editing

During preprocessing, duplicates were removed from XNLI and STS datasets. For STS, sentence pairs with identical text but non-perfect similarity scores were eliminated as translation artifacts.

#### Who are the source language producers?

The source language producers include:

- Native Slovak speakers for original Slovak datasets
- Professional translators and native Slovak post-editors for translated datasets
- Wikipedia contributors for WikiGoldSK and SK-QuAD
- Social media users for hate speech dataset
- Customer reviewers for sentiment analysis dataset

### Annotations

#### Annotation process

Annotation processes varied by dataset:

- **Token-level tasks**: Following Universal Dependencies and Universal NER annotation guidelines
- **WikiGoldSK**: Manual annotation following BSNLP-2017 guidelines with CoNLL-2003 NER tagset
- **Hate Speech**: Expert annotation with quality filtering (removing annotators with >90% uniform responses or <70% agreement)
- **Sentiment Analysis**: Manual labeling by two annotators reaching consensus
- **SK-QuAD**: Created by 150+ volunteers and 9 part-time annotators, validated by 5 paid reviewers
- **Translated datasets**: Professional translation followed by native speaker post-editing

#### Who are the annotators?

Annotators include:

- Expert linguists and NLP researchers for token-level tasks
- Native Slovak speakers for post-editing translated content
- Domain experts for hate speech classification
- Trained volunteers and professional annotators for SK-QuAD
- Customer service experts for sentiment analysis

### Personal and Sensitive Information

The hate speech dataset contains social media posts that may include offensive language by design. Personal information was removed during preprocessing. Other datasets (Wikipedia-based, customer reviews, translated content) have minimal personal information risk.

## Considerations for Using the Data

### Social Impact of Dataset

skLEP enables systematic evaluation and improvement of Slovak NLP models, supporting the development of better language technology for Slovak speakers. The hate speech detection task specifically contributes to online safety tools for Slovak social media platforms.

### Discussion of Biases

Potential biases include:

- **Domain bias**: Wikipedia-heavy content in several tasks may not represent colloquial Slovak
- **Translation bias**: Translated tasks may carry over English linguistic patterns
- **Social media bias**: Hate speech dataset reflects specific online communities
- **Geographic bias**: May favor standard Slovak over regional variants

### Other Known Limitations

- Some test sets differ from English counterparts due to translation and re-labeling requirements
- Dataset sizes vary significantly across tasks
- Limited coverage of specialized domains outside Wikipedia and social media
- Validation sets for some tasks were created by splitting training data rather than independent collection

## Additional Information

### Dataset Curators

skLEP was curated by researchers from:

- Comenius University in Bratislava, Slovakia
- Technical University of Košice, Slovakia
- Kempelen Institute of Intelligent Technologies, Bratislava, Slovakia
- Cisco Systems

Lead contact: Marek Šuppa (<marek@suppa.sk>)

### Licensing Information

The primary skLEP tasks are built on and derived from existing datasets. We refer users to the original licenses accompanying each dataset.

### Citation Information

If you use skLEP, please cite the following paper:

```
@inproceedings{suppa-etal-2025-sklep,
    title = "sk{LEP}: A {S}lovak General Language Understanding Benchmark",
    author = "Suppa, Marek  and
      Ridzik, Andrej  and
      Hl{\'a}dek, Daniel  and
      Jav{\r{u}}rek, Tom{\'a}{\v{s}}  and
      Ondrejov{\'a}, Vikt{\'o}ria  and
      S{\'a}sikov{\'a}, Krist{\'i}na  and
      Tamajka, Martin  and
      Simko, Marian",
    editor = "Che, Wanxiang  and
      Nabende, Joyce  and
      Shutova, Ekaterina  and
      Pilehvar, Mohammad Taher",
    booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
    month = jul,
    year = "2025",
    address = "Vienna, Austria",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2025.findings-acl.1371/",
    pages = "26716--26743",
    ISBN = "979-8-89176-256-5",
    abstract = "In this work, we introduce skLEP, the first comprehensive benchmark specifically designed for evaluating Slovak natural language understanding (NLU) models. We have compiled skLEP to encompass nine diverse tasks that span token-level, sentence-pair, and document-level challenges, thereby offering a thorough assessment of model capabilities. To create this benchmark, we curated new, original datasets tailored for Slovak and meticulously translated established English NLU resources. Within this paper, we also present the first systematic and extensive evaluation of a wide array of Slovak-specific, multilingual, and English pre-trained language models using the skLEP tasks. Finally, we also release the complete benchmark data, an open-source toolkit facilitating both fine-tuning and evaluation of models, and a public leaderboard at \url{https://github.com/slovak-nlp/sklep} in the hopes of fostering reproducibility and drive future research in Slovak NLU."
}
```

### Contributions

Contributions to skLEP include:

- First comprehensive Slovak NLU benchmark with 9 diverse tasks
- High-quality translations with native speaker post-editing
- Extensive baseline evaluations across multiple model types
- Open-source toolkit and standardized leaderboard
- Rigorous evaluation methodology with hyperparameter optimization

Future contributions and improvements are welcome through the project repository.