Datasets:

ArXiv:
License:
Torben Peters commited on
Commit
26c5476
·
1 Parent(s): eb237f2
Files changed (4) hide show
  1. BioMap.py +150 -0
  2. README.md +0 -53
  3. data/test.parquet +0 -3
  4. gitattributes +0 -55
BioMap.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ # TODO: Add BibTeX citation
26
+ # Find for instance the citation on arxiv or on the dataset repo/website
27
+ _CITATION = """\
28
+ @InProceedings{huggingface:dataset,
29
+ title = {A great new dataset},
30
+ author={huggingface, Inc.
31
+ },
32
+ year={2020}
33
+ }
34
+ """
35
+
36
+ # TODO: Add description of the dataset here
37
+ # You can copy an official description
38
+ _DESCRIPTION = """\
39
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
40
+ """
41
+
42
+ # TODO: Add a link to an official homepage for the dataset here
43
+ _HOMEPAGE = ""
44
+
45
+ # TODO: Add the licence for the dataset here if you can find it
46
+ _LICENSE = ""
47
+
48
+ # TODO: Add link to the official dataset URLs here
49
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
+ _URLS = {
52
+ "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
53
+ "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
54
+ }
55
+
56
+
57
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
58
+ class NewDataset(datasets.GeneratorBasedBuilder):
59
+ """TODO: Short description of my dataset."""
60
+
61
+ VERSION = datasets.Version("1.1.0")
62
+
63
+ # This is an example of a dataset with multiple configurations.
64
+ # If you don't want/need to define several sub-sets in your dataset,
65
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
66
+
67
+ # If you need to make complex sub-parts in the datasets with configurable options
68
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
69
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
70
+
71
+ # You will be able to load one or the other configurations in the following list with
72
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
73
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
74
+
75
+ BUILDER_CONFIGS = [
76
+ datasets.BuilderConfig(name="default", version=VERSION, description="This part of my dataset covers a first domain"),
77
+ ]
78
+
79
+ DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
80
+ self.ds = datasets.load_dataset("prs-eth/AGBD")
81
+
82
+ def _info(self):
83
+ features = datasets.Features({
84
+ 'input': datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value('float32')))),
85
+ 'label': datasets.Value('float32')
86
+ })
87
+
88
+ return datasets.DatasetInfo(
89
+ # This is the description that will appear on the datasets page.
90
+ description=_DESCRIPTION,
91
+ # This defines the different columns of the dataset and their types
92
+ features=features, # Here we define them above because they are different between the two configurations
93
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
94
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
95
+ # supervised_keys=("sentence", "label"),
96
+ # Homepage of the dataset for documentation
97
+ homepage=_HOMEPAGE,
98
+ # License for the dataset if available
99
+ license=_LICENSE,
100
+ # Citation for the dataset
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
106
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
107
+
108
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
109
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
110
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
111
+ urls = _URLS[self.config.name]
112
+ data_dir = dl_manager.download_and_extract(urls)
113
+ return [
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TRAIN,
116
+ # These kwargs will be passed to _generate_examples
117
+ gen_kwargs={
118
+ "filepath": os.path.join(data_dir, "train.jsonl"),
119
+ "split": "train",
120
+ },
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.VALIDATION,
124
+ # These kwargs will be passed to _generate_examples
125
+ gen_kwargs={
126
+ "filepath": os.path.join(data_dir, "dev.jsonl"),
127
+ "split": "dev",
128
+ },
129
+ ),
130
+ datasets.SplitGenerator(
131
+ name=datasets.Split.TEST,
132
+ # These kwargs will be passed to _generate_examples
133
+ gen_kwargs={
134
+ "filepath": os.path.join(data_dir, "test.jsonl"),
135
+ "split": "test"
136
+ },
137
+ ),
138
+ ]
139
+
140
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
141
+ def _generate_examples(self, filepath, split):
142
+ for d in self.ds[split]:
143
+ yield i, {
144
+ 'input': d["input"],
145
+ 'label': d["label"]
146
+ }
147
+
148
+
149
+
150
+
README.md DELETED
@@ -1,53 +0,0 @@
1
-
2
- ---
3
- license: cc-by-nc-4.0
4
- dataset_info:
5
- features:
6
- - name: input
7
- sequence:
8
- sequence:
9
- sequence: float32
10
- - name: label
11
- dtype: float32
12
- configs:
13
- - config_name: default
14
- data_files:
15
- - split: train
16
- path: data/test*
17
- - split: validation
18
- path: data/test*
19
- - split: test
20
- path: data/test*
21
- ---
22
-
23
-
24
-
25
- # 🌲 AGBD: A Global-scale Biomass Dataset 🌳
26
-
27
- Authors: Ghjulia Sialelli ([gsialelli@ethz.ch](mailto:gsialelli@ethz.ch)), Torben Peters, Jan Wegner, Konrad Schindler
28
-
29
- ## 15x15 Version
30
-
31
- This dataset is the smaller 15x15 Version if you want to have access to the bigger 25x25 patches goto [huggingface.co/datasets/prs-eth/AGBD](https://huggingface.co/datasets/prs-eth/AGBD)
32
-
33
- ## Dataset Description
34
-
35
- Each dataset sample consists of a pair of pre-cropped, pre-normalized images and their corresponding biomass labels.
36
- The full project page including links to preprocessed uncropped data can be found on [github.com/ghjuliasialelli/AGBD/](https://github.com/ghjuliasialelli/AGBD/)
37
-
38
- ### Image Details
39
-
40
- Each image contains 24 channels, organized as follows:
41
-
42
- - **Spectral Bands**: B01, B02, B03, B04, B05, B06, B07, B08, B8A, B09, B11, B12
43
- - **Geographical Coordinates**: lat_cos, lat_sin, lon_cos, lon_sin
44
- - **ALOS PALSAR Bands**: alos_hh, alos_hv
45
- - **Canopy Heights**: ch, ch_std
46
- - **Land Cover Information**: lc_cos, lc_sin, lc_prob
47
- - **Digital Elevation Model**: dem
48
-
49
- ### Channel Structure
50
-
51
- ```plaintext
52
- (B01 B02 B03 B04 B05 B06 B07 B08 B8A B09 B11 B12) | (lat_cos, lat_sin, lon_cos, lon_sin) | (alos_hh, alos_hv) | (ch, ch_std) | (lc_cos, lc_sin, lc_prob) | dem
53
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba5596325f4a716a4f5b748757ae4816b7d687cdd10b7841775f1ce03280ca16
3
- size 187056189
 
 
 
 
gitattributes DELETED
@@ -1,55 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
- *.model filter=lfs diff=lfs merge=lfs -text
14
- *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
- *.onnx filter=lfs diff=lfs merge=lfs -text
18
- *.ot filter=lfs diff=lfs merge=lfs -text
19
- *.parquet filter=lfs diff=lfs merge=lfs -text
20
- *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
- *.pt filter=lfs diff=lfs merge=lfs -text
24
- *.pth filter=lfs diff=lfs merge=lfs -text
25
- *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
- *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tar filter=lfs diff=lfs merge=lfs -text
30
- *.tflite filter=lfs diff=lfs merge=lfs -text
31
- *.tgz filter=lfs diff=lfs merge=lfs -text
32
- *.wasm filter=lfs diff=lfs merge=lfs -text
33
- *.xz filter=lfs diff=lfs merge=lfs -text
34
- *.zip filter=lfs diff=lfs merge=lfs -text
35
- *.zst filter=lfs diff=lfs merge=lfs -text
36
- *tfevents* filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - uncompressed
38
- *.pcm filter=lfs diff=lfs merge=lfs -text
39
- *.sam filter=lfs diff=lfs merge=lfs -text
40
- *.raw filter=lfs diff=lfs merge=lfs -text
41
- # Audio files - compressed
42
- *.aac filter=lfs diff=lfs merge=lfs -text
43
- *.flac filter=lfs diff=lfs merge=lfs -text
44
- *.mp3 filter=lfs diff=lfs merge=lfs -text
45
- *.ogg filter=lfs diff=lfs merge=lfs -text
46
- *.wav filter=lfs diff=lfs merge=lfs -text
47
- # Image files - uncompressed
48
- *.bmp filter=lfs diff=lfs merge=lfs -text
49
- *.gif filter=lfs diff=lfs merge=lfs -text
50
- *.png filter=lfs diff=lfs merge=lfs -text
51
- *.tiff filter=lfs diff=lfs merge=lfs -text
52
- # Image files - compressed
53
- *.jpg filter=lfs diff=lfs merge=lfs -text
54
- *.jpeg filter=lfs diff=lfs merge=lfs -text
55
- *.webp filter=lfs diff=lfs merge=lfs -text