Datasets:

ArXiv:
License:
PeterTor commited on
Commit
5b30348
·
verified ·
1 Parent(s): 6cc76ec

Update BioMap.py

Browse files
Files changed (1) hide show
  1. BioMap.py +73 -94
BioMap.py CHANGED
@@ -14,14 +14,12 @@
14
  # TODO: Address all TODOs and remove all explanatory comments
15
  """TODO: Add a description here."""
16
 
17
-
18
  import csv
19
  import json
20
  import os
21
  import numpy as np
22
  import datasets
23
-
24
-
25
  # TODO: Add BibTeX citation
26
  # Find for instance the citation on arxiv or on the dataset repo/website
27
  _CITATION = """\
@@ -45,123 +43,104 @@ _HOMEPAGE = ""
45
  # TODO: Add the licence for the dataset here if you can find it
46
  _LICENSE = ""
47
 
48
- # TODO: Add link to the official dataset URLs here
49
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
- _URLS = {
52
- "default": "https://huggingface.co/great-new-dataset-first_domain.zip",
53
- }
54
-
55
- norm_values = {'B01': {'mean': 0.12478869, 'std': 0.024433358, 'min': 1e-04, 'max': 1.8808, 'p1': 0.0787, 'p99': 0.1946}, 'B02': {'mean': 0.13480005, 'std': 0.02822557, 'min': 1e-04, 'max': 2.1776, 'p1': 0.0925, 'p99': 0.2216}, 'B03': {'mean': 0.16031432, 'std': 0.032037303, 'min': 1e-04, 'max': 2.12, 'p1': 0.1035, 'p99': 0.2556}, 'B04': {'mean': 0.1532097, 'std': 0.038628064, 'min': 1e-04, 'max': 2.0032, 'p1': 0.1023, 'p99': 0.2816}, 'B05': {'mean': 0.20312776, 'std': 0.04205057, 'min': 0.0422, 'max': 1.7502, 'p1': 0.1178, 'p99': 0.319}, 'B06': {'mean': 0.32636437, 'std': 0.07139242, 'min': 0.0502, 'max': 1.7245, 'p1': 0.1633, 'p99': 0.519}, 'B07': {'mean': 0.36605212, 'std': 0.08555025, 'min': 0.0616, 'max': 1.7149, 'p1': 0.1776, 'p99': 0.6076}, 'B08': {'mean': 0.3811653, 'std': 0.092815965, 'min': 1e-04, 'max': 1.7488, 'p1': 0.1691, 'p99': 0.646}, 'B8A': {'mean': 0.3910436, 'std': 0.0896364, 'min': 0.055, 'max': 1.688, 'p1': 0.1871, 'p99': 0.6386}, 'B09': {'mean': 0.3910644, 'std': 0.0836445, 'min': 0.0012, 'max': 1.7915, 'p1': 0.2124, 'p99': 0.6241}, 'B11': {'mean': 0.2917373, 'std': 0.07472579, 'min': 0.0953, 'max': 1.648, 'p1': 0.1334, 'p99': 0.4827}, 'B12': {'mean': 0.21169408, 'std': 0.05880649, 'min': 0.0975, 'max': 1.6775, 'p1': 0.115, 'p99': 0.3872}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
58
  class NewDataset(datasets.GeneratorBasedBuilder):
59
- """TODO: Short description of my dataset."""
 
 
 
 
 
 
60
 
61
  VERSION = datasets.Version("1.1.0")
62
 
63
- # This is an example of a dataset with multiple configurations.
64
- # If you don't want/need to define several sub-sets in your dataset,
65
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
66
-
67
- # If you need to make complex sub-parts in the datasets with configurable options
68
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
69
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
70
-
71
- # You will be able to load one or the other configurations in the following list with
72
- # data = datasets.load_dataset('my_dataset', 'first_domain')
73
- # data = datasets.load_dataset('my_dataset', 'second_domain')
74
-
75
  BUILDER_CONFIGS = [
76
- datasets.BuilderConfig(name="default", version=VERSION, description="This part of my dataset covers a first domain"),
77
- datasets.BuilderConfig(name="unnormalized", version=VERSION, description="This part of my dataset covers a first domain"),
78
  ]
79
 
80
- DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
81
-
 
 
 
82
 
83
  def _info(self):
84
- self.ds = datasets.load_dataset("prs-eth/AGBD",streaming=True)
85
- features = datasets.Features({
86
  'input': datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value('float32')))),
87
- 'label': datasets.Value('float32')
88
- })
 
 
 
89
 
90
  return datasets.DatasetInfo(
91
- # This is the description that will appear on the datasets page.
92
  description=_DESCRIPTION,
93
- # This defines the different columns of the dataset and their types
94
- features=features, # Here we define them above because they are different between the two configurations
95
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
96
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
97
- # supervised_keys=("sentence", "label"),
98
- # Homepage of the dataset for documentation
99
  homepage=_HOMEPAGE,
100
- # License for the dataset if available
101
  license=_LICENSE,
102
- # Citation for the dataset
103
  citation=_CITATION,
104
  )
105
 
106
- def denormalize_s2(self,patch):
107
- r,g,b = patch[3],patch[2],patch[1]
108
  res = []
109
- for band, band_value in zip(['B04','B03','B02'],[r,g,b]) :
110
- p1, p99 = norm_values[band]['p1'], norm_values[band]['p99']
111
- band_value = (p99 - p1) * band_value + p1
112
- res.append(band_value)
113
- patch[3],patch[2],patch[1] = res
114
  return patch
115
-
116
- def _split_generators(self, dl_manager):
117
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
118
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
119
-
120
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
121
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
122
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
123
 
 
 
124
  return [
125
- datasets.SplitGenerator(
126
- name=datasets.Split.TRAIN,
127
- # These kwargs will be passed to _generate_examples
128
- gen_kwargs={
129
-
130
- "split": "train",
131
- },
132
- ),
133
- datasets.SplitGenerator(
134
- name=datasets.Split.VALIDATION,
135
- # These kwargs will be passed to _generate_examples
136
- gen_kwargs={
137
-
138
- "split": "val",
139
- },
140
- ),
141
- datasets.SplitGenerator(
142
- name=datasets.Split.TEST,
143
- # These kwargs will be passed to _generate_examples
144
- gen_kwargs={
145
-
146
- "split": "test"
147
- },
148
- ),
149
  ]
150
 
151
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
152
  def _generate_examples(self, split):
153
- for i,d in enumerate(self.ds[split]):
154
  if self.config.name == "default":
155
- yield i, {
156
- 'input': d["input"],
157
- 'label': d["label"]
158
- }
159
-
160
  elif self.config.name == "unnormalized":
161
- yield i, {
162
- 'input': self.denormalize_s2(np.array(d["input"])),
163
- 'label': d["label"]
164
- }
165
-
166
-
 
 
 
 
 
167
 
 
14
  # TODO: Address all TODOs and remove all explanatory comments
15
  """TODO: Add a description here."""
16
 
 
17
  import csv
18
  import json
19
  import os
20
  import numpy as np
21
  import datasets
22
+ from datasets import Value
 
23
  # TODO: Add BibTeX citation
24
  # Find for instance the citation on arxiv or on the dataset repo/website
25
  _CITATION = """\
 
43
  # TODO: Add the licence for the dataset here if you can find it
44
  _LICENSE = ""
45
 
46
+ norm_values = {
47
+ 'B01': {'mean': 0.12478869, 'std': 0.024433358, 'min': 1e-04, 'max': 1.8808, 'p1': 0.0787, 'p99': 0.1946},
48
+ 'B02': {'mean': 0.13480005, 'std': 0.02822557, 'min': 1e-04, 'max': 2.1776, 'p1': 0.0925, 'p99': 0.2216},
49
+ 'B03': {'mean': 0.16031432, 'std': 0.032037303, 'min': 1e-04, 'max': 2.12, 'p1': 0.1035, 'p99': 0.2556},
50
+ 'B04': {'mean': 0.1532097, 'std': 0.038628064, 'min': 1e-04, 'max': 2.0032, 'p1': 0.1023, 'p99': 0.2816},
51
+ 'B05': {'mean': 0.20312776, 'std': 0.04205057, 'min': 0.0422, 'max': 1.7502, 'p1': 0.1178, 'p99': 0.319},
52
+ 'B06': {'mean': 0.32636437, 'std': 0.07139242, 'min': 0.0502, 'max': 1.7245, 'p1': 0.1633, 'p99': 0.519},
53
+ 'B07': {'mean': 0.36605212, 'std': 0.08555025, 'min': 0.0616, 'max': 1.7149, 'p1': 0.1776, 'p99': 0.6076},
54
+ 'B08': {'mean': 0.3811653, 'std': 0.092815965, 'min': 1e-04, 'max': 1.7488, 'p1': 0.1691, 'p99': 0.646},
55
+ 'B8A': {'mean': 0.3910436, 'std': 0.0896364, 'min': 0.055, 'max': 1.688, 'p1': 0.1871, 'p99': 0.6386},
56
+ 'B09': {'mean': 0.3910644, 'std': 0.0836445, 'min': 0.0012, 'max': 1.7915, 'p1': 0.2124, 'p99': 0.6241},
57
+ 'B11': {'mean': 0.2917373, 'std': 0.07472579, 'min': 0.0953, 'max': 1.648, 'p1': 0.1334, 'p99': 0.4827},
58
+ 'B12': {'mean': 0.21169408, 'std': 0.05880649, 'min': 0.0975, 'max': 1.6775, 'p1': 0.115, 'p99': 0.3872}}
59
+
60
+ feature_dtype = {'s2_num_days': Value('int16'),
61
+ 'gedi_num_days': Value('uint16'),
62
+ 'lat': Value('float32'),
63
+ 'lon': Value('float32'),
64
+ "agbd_se": Value('float32'),
65
+ "elev_lowes": Value('float32'),
66
+ "leaf_off_f": Value('uint8'),
67
+ "pft_class": Value('uint8'),
68
+ "region_cla": Value('uint8'),
69
+ "rh98": Value('float32'),
70
+ "sensitivity": Value('float32'),
71
+ "solar_elev": Value('float32'),
72
+ "urban_prop":Value('uint8')}
73
 
 
74
  class NewDataset(datasets.GeneratorBasedBuilder):
75
+ def __init__(self, *args, additional_features=[], normalize_data=True, patch_size=15, **kwargs):
76
+ self.inner_dataset_kwargs = kwargs
77
+ self._is_streaming = False
78
+ self.patch_size = patch_size
79
+ self.normalize_data = normalize_data
80
+ self.additional_features = additional_features
81
+ super().__init__(*args, **kwargs)
82
 
83
  VERSION = datasets.Version("1.1.0")
84
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  BUILDER_CONFIGS = [
86
+ datasets.BuilderConfig(name="default", version=VERSION, description="Normalized data"),
87
+ datasets.BuilderConfig(name="unnormalized", version=VERSION, description="Unnormalized data"),
88
  ]
89
 
90
+ DEFAULT_CONFIG_NAME = "default"
91
+
92
+ def as_streaming_dataset(self, split=None, base_path=None):
93
+ self._is_streaming = True
94
+ return super().as_streaming_dataset(split=split, base_path=base_path)
95
 
96
  def _info(self):
97
+ all_features = {
 
98
  'input': datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value('float32')))),
99
+ 'label': Value('float32')
100
+ }
101
+ for feat in self.additional_features:
102
+ all_features[feat] = feature_dtype[feat]
103
+ features = datasets.Features(all_features)
104
 
105
  return datasets.DatasetInfo(
 
106
  description=_DESCRIPTION,
107
+ features=features,
 
 
 
 
 
108
  homepage=_HOMEPAGE,
 
109
  license=_LICENSE,
 
110
  citation=_CITATION,
111
  )
112
 
113
+ def denormalize_s2(self, patch):
 
114
  res = []
115
+ for band, band_value in zip(['B04', 'B03', 'B02'], [patch[3], patch[2], patch[1]]):
116
+ p1, p99 = norm_values[band]['p1'], norm_values[band]['p99']
117
+ band_value = (p99 - p1) * band_value + p1
118
+ res.append(band_value)
119
+ patch[3], patch[2], patch[1] = res
120
  return patch
 
 
 
 
 
 
 
 
121
 
122
+ def _split_generators(self, dl_manager):
123
+ self.original_dataset = datasets.load_dataset("prs-eth/AGBD_raw", streaming=self._is_streaming)
124
  return [
125
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train"}),
126
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"split": "val"}),
127
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test"}),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  ]
129
 
 
130
  def _generate_examples(self, split):
131
+ for i, d in enumerate(self.original_dataset[split]):
132
  if self.config.name == "default":
133
+ data = {'input': np.asarray(d["input"]), 'label': d["label"]}
 
 
 
 
134
  elif self.config.name == "unnormalized":
135
+ data = {'input': np.asarray(self.denormalize_s2(np.array(d["input"]))), 'label': d["label"]}
136
+
137
+ start_x = (data["input"].shape[1] - self.patch_size) // 2
138
+ start_y = (data["input"].shape[2] - self.patch_size) // 2
139
+ data["input"] = data["input"][:, start_x:start_x + self.patch_size, start_y:start_y + self.patch_size]
140
+
141
+ for feat in self.additional_features:
142
+ data[feat] = d["metadata"][feat]
143
+
144
+ yield i, data
145
+
146