Datasets:

Sub-tasks:
fact-checking
Languages:
Arabic
ArXiv:
License:
mkon commited on
Commit
e6e6f3d
·
1 Parent(s): 0d60e6a

rename dataloader

Browse files
Files changed (2) hide show
  1. ans_stance.py +0 -101
  2. dataset_infos.json +0 -1
ans_stance.py DELETED
@@ -1,101 +0,0 @@
1
- # Copyright 2022 Mads Kongsbak and Leon Derczynski
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """DataLoader for ANS, an Arabic News Stance corpus"""
15
-
16
-
17
- import csv
18
- import json
19
- import os
20
-
21
- import datasets
22
-
23
- _CITATION = """\
24
- @inproceedings{,
25
- title = "Stance Prediction and Claim Verification: An {A}rabic Perspective",
26
- author = "Khouja, Jude",
27
- booktitle = "Proceedings of the Third Workshop on Fact Extraction and {VER}ification ({FEVER})",
28
- year = "2020",
29
- address = "Seattle, USA",
30
- publisher = "Association for Computational Linguistics",
31
- }
32
- """
33
-
34
- _DESCRIPTION = """\
35
- The dataset is a collection of news titles in arabic along with paraphrased and corrupted titles. The stance prediction version is a 3-class classification task. Data contains three columns: s1, s2, stance.
36
- """
37
-
38
- _HOMEPAGE = ""
39
-
40
- _LICENSE = "apache-2.0"
41
-
42
- class ANSStanceConfig(datasets.BuilderConfig):
43
-
44
- def __init__(self, **kwargs):
45
- super(ANSStanceConfig, self).__init__(**kwargs)
46
-
47
- class ANSStance(datasets.GeneratorBasedBuilder):
48
- """ANS dataset made in triples of (s1, s2, stance)"""
49
-
50
- VERSION = datasets.Version("1.0.0")
51
-
52
- BUILDER_CONFIGS = [
53
- ANSStanceConfig(name="stance", version=VERSION, description=""),
54
- ]
55
-
56
- def _info(self):
57
- features = datasets.Features(
58
- {
59
- "id": datasets.Value("string"),
60
- "s1": datasets.Value("string"),
61
- "s2": datasets.Value("string"),
62
- "stance": datasets.features.ClassLabel(
63
- names=[
64
- "disagree",
65
- "agree",
66
- "other"
67
- ]
68
- )
69
- }
70
- )
71
-
72
- return datasets.DatasetInfo(
73
- description=_DESCRIPTION,
74
- features=features,
75
- homepage=_HOMEPAGE,
76
- license=_LICENSE,
77
- citation=_CITATION,
78
- )
79
-
80
- def _split_generators(self, dl_manager):
81
- train_text = dl_manager.download_and_extract("ans_train.csv")
82
- valid_text = dl_manager.download_and_extract("ans_dev.csv")
83
- test_text = dl_manager.download_and_extract("ans_test.csv")
84
-
85
- return [
86
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_text, "split": "train"}),
87
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_text, "split": "validation"}),
88
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_text, "split": "test"}),
89
- ]
90
-
91
- def _generate_examples(self, filepath, split):
92
- with open(filepath, encoding="utf-8") as f:
93
- reader = csv.DictReader(f, delimiter=",")
94
- guid = 0
95
- for instance in reader:
96
- instance["s1"] = instance.pop("s1")
97
- instance["s2"] = instance.pop("s2")
98
- instance["stance"] = instance.pop("stance")
99
- instance['id'] = str(guid)
100
- yield guid, instance
101
- guid += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"stance": {"description": "The dataset is a collection of news titles in arabic along with paraphrased and corrupted titles. The stance prediction version is a 3-class classification task. Data contains three columns: s1, s2, stance:\n", "citation": "@inproceedings{,\n title = \"Stance Prediction and Claim Verification: An {A}rabic Perspective\", \n author = \"Khouja, Jude\",\n booktitle = \"Proceedings of the Third Workshop on Fact Extraction and {VER}ification ({FEVER})\",\n year = \"2020\",\n address = \"Seattle, USA\",\n publisher = \"Association for Computational Linguistics\",\n}\n", "homepage": "", "license": "apache-2.0", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "s1": {"dtype": "string", "id": null, "_type": "Value"}, "s2": {"dtype": "string", "id": null, "_type": "Value"}, "stance": {"num_classes": 3, "names": ["disagree", "agree", "other"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ans_stance", "config_name": "stance", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 531232, "num_examples": 2652, "dataset_name": "ans_stance"}, "validation": {"name": "validation", "num_bytes": 153125, "num_examples": 755, "dataset_name": "ans_stance"}, "test": {"name": "test", "num_bytes": 76099, "num_examples": 379, "dataset_name": "ans_stance"}}, "download_checksums": {"ans_train.csv": {"num_bytes": 507122, "checksum": "b5486ca4d5f434aaab5025953aeccebffc47181e7918695456fbc26af5e5c7d4"}, "ans_dev.csv": {"num_bytes": 146237, "checksum": "ca7fca0ecaaf5b248d002b104bee54e2d56c32aab9f5fbb7d3ee71d4d88a7621"}, "ans_test.csv": {"num_bytes": 72660, "checksum": "371622ff09260064297197a439fd62ac91c932d8975f3333815746db57f82920"}}, "download_size": 726019, "post_processing_size": null, "dataset_size": 760456, "size_in_bytes": 1486475}}