Updating processor.py
Browse files* `--sanitize` argument now works as a flag
* If json file with annotation can not be parsed, it is discarded.
* Data sanitation is announced by CLI
- processor.py +14 -9
processor.py
CHANGED
@@ -95,7 +95,7 @@ def fix_snippets(logs: dict):
|
|
95 |
return logs
|
96 |
|
97 |
|
98 |
-
def cleanup(dataset: pd.DataFrame):
|
99 |
"""Cleaning the dataset
|
100 |
|
101 |
For sake of simplicity we are going to assume following.
|
@@ -105,6 +105,8 @@ def cleanup(dataset: pd.DataFrame):
|
|
105 |
3. Submissions with total length of snippet annotations shorter than `10` chars are not useful.
|
106 |
4. Submissions with snippets set to `None` are not useful."""
|
107 |
|
|
|
|
|
108 |
# Fixing snippets with messed up text
|
109 |
filled_snippets = dataset.logs.apply(fix_snippets)
|
110 |
dataset['filled_snippets'] = filled_snippets
|
@@ -116,9 +118,9 @@ def cleanup(dataset: pd.DataFrame):
|
|
116 |
dataset["tot_snippet_annot_len"] = dataset['logs'].apply(total_snippet_annotation_len)
|
117 |
|
118 |
# Conditions
|
119 |
-
almost_empty_fix = dataset['how_to_fix_len'] <
|
120 |
-
almost_empty_reason = dataset['fail_reason_len'] <
|
121 |
-
almost_empty_snippet_annotations = dataset["tot_snippet_annot_len"] <
|
122 |
none_snippets = dataset['logs'].apply(none_snippet)
|
123 |
|
124 |
sparse_annotation_criteria = (
|
@@ -150,10 +152,13 @@ def load_data(data_path: str):
|
|
150 |
for root, _, files in os.walk(data_path):
|
151 |
for file in files:
|
152 |
if file.endswith(".json"):
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
|
|
|
|
|
|
157 |
|
158 |
return data
|
159 |
|
@@ -262,7 +267,7 @@ if __name__ == "__main__":
|
|
262 |
parser.add_argument(
|
263 |
"data_path", type=str, default="results/results", help="Path to annotations.")
|
264 |
parser.add_argument(
|
265 |
-
"--sanitize",
|
266 |
parser.add_argument(
|
267 |
"--seed", type=int, default=42,
|
268 |
help="Seed for random generator to be used when generating split.")
|
|
|
95 |
return logs
|
96 |
|
97 |
|
98 |
+
def cleanup(dataset: pd.DataFrame, min_how_to_fix_len=10, min_fail_reason_len=10, min_total_snippet_annot_len=10):
|
99 |
"""Cleaning the dataset
|
100 |
|
101 |
For sake of simplicity we are going to assume following.
|
|
|
105 |
3. Submissions with total length of snippet annotations shorter than `10` chars are not useful.
|
106 |
4. Submissions with snippets set to `None` are not useful."""
|
107 |
|
108 |
+
print("Performing dataset cleanup")
|
109 |
+
|
110 |
# Fixing snippets with messed up text
|
111 |
filled_snippets = dataset.logs.apply(fix_snippets)
|
112 |
dataset['filled_snippets'] = filled_snippets
|
|
|
118 |
dataset["tot_snippet_annot_len"] = dataset['logs'].apply(total_snippet_annotation_len)
|
119 |
|
120 |
# Conditions
|
121 |
+
almost_empty_fix = dataset['how_to_fix_len'] < min_how_to_fix_len
|
122 |
+
almost_empty_reason = dataset['fail_reason_len'] < min_fail_reason_len
|
123 |
+
almost_empty_snippet_annotations = dataset["tot_snippet_annot_len"] < min_total_snippet_annot_len
|
124 |
none_snippets = dataset['logs'].apply(none_snippet)
|
125 |
|
126 |
sparse_annotation_criteria = (
|
|
|
152 |
for root, _, files in os.walk(data_path):
|
153 |
for file in files:
|
154 |
if file.endswith(".json"):
|
155 |
+
try:
|
156 |
+
with open(os.path.join(root, file), 'r', encoding='utf-8') as f:
|
157 |
+
parsed_data = json.load(f)
|
158 |
+
parsed_data["file_name"] = file
|
159 |
+
data.append(parsed_data)
|
160 |
+
except json.decoder.JSONDecodeError:
|
161 |
+
print(f"Exception encountered while parsing {os.path.join(root, file)}, skipping.")
|
162 |
|
163 |
return data
|
164 |
|
|
|
267 |
parser.add_argument(
|
268 |
"data_path", type=str, default="results/results", help="Path to annotations.")
|
269 |
parser.add_argument(
|
270 |
+
"--sanitize", action='store_true', help="Run basic data cleanup procedure, dataset build may fail without it.")
|
271 |
parser.add_argument(
|
272 |
"--seed", type=int, default=42,
|
273 |
help="Seed for random generator to be used when generating split.")
|