|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""String Operations Dataset for fast model development""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import re |
|
|
|
import datasets |
|
import gzip |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {String Operations Dataset: A small set of string manipulation tasks for fast model development}, |
|
author={Michael Granitzer}, |
|
year={2023} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
Minimal dataset for intended for LM development and testing using python string operations. The dataset is created by running different one line python string operations on random strings The idea is, that transformer implementation can learn the string operations and that this task is a good proxy tasks for other transformer operations on real languages and real tasks. Consequently, the data set is small and can be used in the development process without large scale infrastructures. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "Apache 2.0 License" |
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"small": ["https://github.com/mgrani/StOp-Dataset/raw/main/small/stop_10_train.json.gz", |
|
"https://github.com/mgrani/StOp-Dataset/raw/main/small/stop_10_test.json.gz", |
|
"https://github.com/mgrani/StOp-Dataset/raw/main/small/stop_20_train.json.gz", |
|
"https://github.com/mgrani/StOp-Dataset/raw/main/small/stop_20_test.json.gz", |
|
"https://github.com/mgrani/StOp-Dataset/raw/main/small/stop_15_train.json.gz", |
|
"https://github.com/mgrani/StOp-Dataset/raw/main/small/stop_15_test.json.gz", |
|
"https://github.com/mgrani/StOp-Dataset/raw/main/small/about.json", |
|
"https://github.com/mgrani/StOp-Dataset/raw/main/small/Readme.md"] |
|
} |
|
|
|
|
|
|
|
class StopDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("0.0.1") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="small", version=VERSION, description="Small string operations dataset with string slices only"), |
|
datasets.BuilderConfig(name="small[filter]", version=VERSION, description="Small string operations dataset with string slices only. [] allows to specify a comma separated list of filters on the length (i.e. l=X) and operations (i.e. o=y)"), |
|
|
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "small" |
|
|
|
def _info(self): |
|
|
|
if self.config.name.startswith("small"): |
|
features = datasets.Features( |
|
{ |
|
"input": datasets.Value("string"), |
|
"output": datasets.Value("string"), |
|
"code": datasets.Value("string"), |
|
"res_var": datasets.Value("string"), |
|
"operation": datasets.Value("string") |
|
|
|
} |
|
) |
|
self._init_filters(self.config.name[len("small"):].strip("[]").split(",")) |
|
self.config.name= self.config.name[:len("small")] |
|
else: |
|
raise NotImplementedError() |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _init_filters(self, filters): |
|
self.filter_operations = [] |
|
self.filter_len = [] |
|
for filter in filters: |
|
if filter =="": continue |
|
k, v = filter.split("=") |
|
if k=="l": |
|
self.filter_len.append(int(v)) |
|
elif k=="o": |
|
self.filter_operations.append(re.compile(v)) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = _URLS[self.config.name] |
|
if len(self.filter_len)>0: |
|
urls = [url for url in urls if any([f"stop_{str(len)}_t" in url for len in self.filter_len])] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": [data_dir[i] for i, n in enumerate(urls) if "_train.json" in n], |
|
"split": "train", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": [data_dir[i] for i, n in enumerate(urls) if "_test.json" in n], |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
def _match_operations_filter(self, operation): |
|
if self.filter_operations is not None: |
|
matches = False |
|
for filter in self.filter_operations: |
|
if filter.matches(operation): |
|
matches = True |
|
break |
|
return matches |
|
else: return True |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
|
|
count = 0 |
|
for filename in filepath: |
|
with open(filename, encoding="utf-8") as f: |
|
dataset = json.load(f) |
|
for ix, data in enumerate(dataset): |
|
|
|
if self.config.name.startswith("small"): |
|
|
|
if self._match_operations_filter(data["operation"]): |
|
continue |
|
|
|
|
|
id = data["id"] if "id" in data else count |
|
count = count + 1 |
|
yield id, { |
|
"input": data["input"], |
|
"output": data["output"], |
|
"code": data["code"], |
|
"res_var": data["res_var"], |
|
"operation": data["operation"] |
|
} |
|
else: |
|
yield "", { |
|
"sentence": data["sentence"], |
|
"option2": data["option2"], |
|
"second_domain_answer": "" if split == "test" else data["second_domain_answer"], |
|
} |
|
|