Formatting processor.py with ruff (#4)
Browse files- Formatting processor.py with ruff (dfa6c803fd9fb38dba5e58ed6c82f29ebae16a24)
- processor.py +78 -43
processor.py
CHANGED
@@ -54,6 +54,7 @@ FULL_CONVERSATION_MSG = """Given following log snippets, their explanation, and
|
|
54 |
|
55 |
ANALYSIS_MSG = "Issue: {issue}\nResolution: {resolution}"
|
56 |
|
|
|
57 |
def n_snippets(logs: dict):
|
58 |
"""Count number of snippets in a log."""
|
59 |
cnt = 0
|
@@ -89,13 +90,20 @@ def fix_snippets(logs: dict):
|
|
89 |
for log in logs:
|
90 |
if logs[log]:
|
91 |
for snippet in logs[log]["snippets"]:
|
92 |
-
snippet_text = logs[log]["content"][
|
|
|
|
|
93 |
if "text" not in snippet or not snippet["text"]:
|
94 |
snippet["text"] = snippet_text
|
95 |
return logs
|
96 |
|
97 |
|
98 |
-
def cleanup(
|
|
|
|
|
|
|
|
|
|
|
99 |
"""Cleaning the dataset
|
100 |
|
101 |
For sake of simplicity we are going to assume following.
|
@@ -109,30 +117,36 @@ def cleanup(dataset: pd.DataFrame, min_how_to_fix_len=10, min_fail_reason_len=10
|
|
109 |
|
110 |
# Fixing snippets with messed up text
|
111 |
filled_snippets = dataset.logs.apply(fix_snippets)
|
112 |
-
dataset[
|
113 |
dataset.logs = dataset.filled_snippets
|
114 |
|
115 |
# Setting conditional columns
|
116 |
-
dataset[
|
117 |
-
dataset[
|
118 |
-
dataset["tot_snippet_annot_len"] = dataset[
|
|
|
|
|
119 |
|
120 |
# Conditions
|
121 |
-
almost_empty_fix = dataset[
|
122 |
-
almost_empty_reason = dataset[
|
123 |
-
almost_empty_snippet_annotations =
|
124 |
-
|
|
|
|
|
125 |
|
126 |
sparse_annotation_criteria = (
|
127 |
-
almost_empty_snippet_annotations
|
128 |
-
almost_empty_reason
|
129 |
-
almost_empty_fix
|
130 |
-
none_snippets
|
|
|
131 |
|
132 |
sparse_count = dataset[sparse_annotation_criteria].shape[0]
|
133 |
print(
|
134 |
f"Total sparse annotations: {sparse_count} \
|
135 |
-
Relative to original dataset: {sparse_count / dataset.shape[0]}"
|
|
|
136 |
|
137 |
# Applying filters
|
138 |
final_dataset = dataset[~sparse_annotation_criteria]
|
@@ -153,12 +167,14 @@ def load_data(data_path: str):
|
|
153 |
for file in files:
|
154 |
if file.endswith(".json"):
|
155 |
try:
|
156 |
-
with open(os.path.join(root, file),
|
157 |
parsed_data = json.load(f)
|
158 |
parsed_data["file_name"] = file
|
159 |
data.append(parsed_data)
|
160 |
except json.decoder.JSONDecodeError:
|
161 |
-
print(
|
|
|
|
|
162 |
|
163 |
return data
|
164 |
|
@@ -167,23 +183,21 @@ def make_snippet_qa(snippet: dict, user_message_template: str):
|
|
167 |
"""Create a QnA pair from a snippet"""
|
168 |
return {
|
169 |
"question": user_message_template.format(snippet["text"]),
|
170 |
-
"answer": snippet["user_comment"]
|
171 |
}
|
172 |
|
173 |
|
174 |
-
def make_analysis_qa(
|
175 |
-
|
|
|
176 |
"""Create QnA pair from entire annotation."""
|
177 |
formatted_snippets = "\n=============\n".join(
|
178 |
-
[
|
179 |
-
f'[{e["text"]}]: [{e["user_comment"]}]'
|
180 |
-
for e in snippets
|
181 |
-
]
|
182 |
)
|
183 |
|
184 |
return {
|
185 |
"question": user_message_template.format(formatted_snippets),
|
186 |
-
"answer": analysis_template.format(issue=issue, resolution=resolution)
|
187 |
}
|
188 |
|
189 |
|
@@ -204,9 +218,7 @@ def to_qa_pairs(dataframe: pd.DataFrame):
|
|
204 |
if logfiles[file] and "snippets" in logfiles[file]:
|
205 |
for snippet in logfiles[file]["snippets"]:
|
206 |
sample_snippet_messages.append(
|
207 |
-
make_snippet_qa(
|
208 |
-
snippet,
|
209 |
-
SNIPPET_USER_MSG)
|
210 |
)
|
211 |
annotated_snippets.append(snippet)
|
212 |
qa_pairs.append(
|
@@ -215,7 +227,9 @@ def to_qa_pairs(dataframe: pd.DataFrame):
|
|
215 |
data.fail_reason,
|
216 |
data.how_to_fix,
|
217 |
FULL_CONVERSATION_MSG,
|
218 |
-
ANALYSIS_MSG
|
|
|
|
|
219 |
# Adding individual snippet messages
|
220 |
qa_pairs.extend(sample_snippet_messages)
|
221 |
return qa_pairs
|
@@ -225,14 +239,18 @@ def split_dataset(dataset: pd.DataFrame, seed: int):
|
|
225 |
"""Splits dataset into training and evaluation subset"""
|
226 |
split = dataset.shape[0] // 5
|
227 |
dataset = dataset.sample(frac=1.0, random_state=seed)
|
228 |
-
train_dataset, test_dataset =
|
229 |
|
230 |
return train_dataset, test_dataset
|
231 |
|
232 |
|
233 |
-
def main(
|
234 |
-
|
235 |
-
|
|
|
|
|
|
|
|
|
236 |
"""Process entire directory and turn contents into parquet files."""
|
237 |
|
238 |
# For reproducibility
|
@@ -252,7 +270,8 @@ def main(hf_token, data_path="results/results", sanitize=True,
|
|
252 |
|
253 |
dataset = {
|
254 |
"train": datasets.Dataset.from_list(train_dataset),
|
255 |
-
"test": datasets.Dataset.from_list(test_dataset)
|
|
|
256 |
|
257 |
dataset = datasets.DatasetDict(dataset)
|
258 |
|
@@ -265,18 +284,34 @@ def main(hf_token, data_path="results/results", sanitize=True,
|
|
265 |
if __name__ == "__main__":
|
266 |
parser = argparse.ArgumentParser("Dataset Processor")
|
267 |
parser.add_argument(
|
268 |
-
"data_path", type=str, default="results/results", help="Path to annotations."
|
|
|
269 |
parser.add_argument(
|
270 |
-
"--sanitize",
|
|
|
|
|
|
|
271 |
parser.add_argument(
|
272 |
-
"--seed",
|
273 |
-
|
|
|
|
|
|
|
274 |
parser.add_argument(
|
275 |
-
"--repo_id",
|
276 |
-
|
277 |
-
|
|
|
|
|
|
|
|
|
|
|
278 |
args = parser.parse_args()
|
279 |
|
280 |
main(
|
281 |
-
data_path=args.data_path,
|
282 |
-
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
ANALYSIS_MSG = "Issue: {issue}\nResolution: {resolution}"
|
56 |
|
57 |
+
|
58 |
def n_snippets(logs: dict):
|
59 |
"""Count number of snippets in a log."""
|
60 |
cnt = 0
|
|
|
90 |
for log in logs:
|
91 |
if logs[log]:
|
92 |
for snippet in logs[log]["snippets"]:
|
93 |
+
snippet_text = logs[log]["content"][
|
94 |
+
snippet["start_index"] : snippet["end_index"]
|
95 |
+
]
|
96 |
if "text" not in snippet or not snippet["text"]:
|
97 |
snippet["text"] = snippet_text
|
98 |
return logs
|
99 |
|
100 |
|
101 |
+
def cleanup(
|
102 |
+
dataset: pd.DataFrame,
|
103 |
+
min_how_to_fix_len=10,
|
104 |
+
min_fail_reason_len=10,
|
105 |
+
min_total_snippet_annot_len=10,
|
106 |
+
):
|
107 |
"""Cleaning the dataset
|
108 |
|
109 |
For sake of simplicity we are going to assume following.
|
|
|
117 |
|
118 |
# Fixing snippets with messed up text
|
119 |
filled_snippets = dataset.logs.apply(fix_snippets)
|
120 |
+
dataset["filled_snippets"] = filled_snippets
|
121 |
dataset.logs = dataset.filled_snippets
|
122 |
|
123 |
# Setting conditional columns
|
124 |
+
dataset["how_to_fix_len"] = dataset.how_to_fix.apply(len)
|
125 |
+
dataset["fail_reason_len"] = dataset.fail_reason.apply(len)
|
126 |
+
dataset["tot_snippet_annot_len"] = dataset["logs"].apply(
|
127 |
+
total_snippet_annotation_len
|
128 |
+
)
|
129 |
|
130 |
# Conditions
|
131 |
+
almost_empty_fix = dataset["how_to_fix_len"] < min_how_to_fix_len
|
132 |
+
almost_empty_reason = dataset["fail_reason_len"] < min_fail_reason_len
|
133 |
+
almost_empty_snippet_annotations = (
|
134 |
+
dataset["tot_snippet_annot_len"] < min_total_snippet_annot_len
|
135 |
+
)
|
136 |
+
none_snippets = dataset["logs"].apply(none_snippet)
|
137 |
|
138 |
sparse_annotation_criteria = (
|
139 |
+
almost_empty_snippet_annotations
|
140 |
+
| almost_empty_reason
|
141 |
+
| almost_empty_fix
|
142 |
+
| none_snippets
|
143 |
+
)
|
144 |
|
145 |
sparse_count = dataset[sparse_annotation_criteria].shape[0]
|
146 |
print(
|
147 |
f"Total sparse annotations: {sparse_count} \
|
148 |
+
Relative to original dataset: {sparse_count / dataset.shape[0]}"
|
149 |
+
)
|
150 |
|
151 |
# Applying filters
|
152 |
final_dataset = dataset[~sparse_annotation_criteria]
|
|
|
167 |
for file in files:
|
168 |
if file.endswith(".json"):
|
169 |
try:
|
170 |
+
with open(os.path.join(root, file), "r", encoding="utf-8") as f:
|
171 |
parsed_data = json.load(f)
|
172 |
parsed_data["file_name"] = file
|
173 |
data.append(parsed_data)
|
174 |
except json.decoder.JSONDecodeError:
|
175 |
+
print(
|
176 |
+
f"Exception encountered while parsing {os.path.join(root, file)}, skipping."
|
177 |
+
)
|
178 |
|
179 |
return data
|
180 |
|
|
|
183 |
"""Create a QnA pair from a snippet"""
|
184 |
return {
|
185 |
"question": user_message_template.format(snippet["text"]),
|
186 |
+
"answer": snippet["user_comment"],
|
187 |
}
|
188 |
|
189 |
|
190 |
+
def make_analysis_qa(
|
191 |
+
snippets, issue, resolution, user_message_template, analysis_template
|
192 |
+
):
|
193 |
"""Create QnA pair from entire annotation."""
|
194 |
formatted_snippets = "\n=============\n".join(
|
195 |
+
[f"[{e['text']}]: [{e['user_comment']}]" for e in snippets]
|
|
|
|
|
|
|
196 |
)
|
197 |
|
198 |
return {
|
199 |
"question": user_message_template.format(formatted_snippets),
|
200 |
+
"answer": analysis_template.format(issue=issue, resolution=resolution),
|
201 |
}
|
202 |
|
203 |
|
|
|
218 |
if logfiles[file] and "snippets" in logfiles[file]:
|
219 |
for snippet in logfiles[file]["snippets"]:
|
220 |
sample_snippet_messages.append(
|
221 |
+
make_snippet_qa(snippet, SNIPPET_USER_MSG)
|
|
|
|
|
222 |
)
|
223 |
annotated_snippets.append(snippet)
|
224 |
qa_pairs.append(
|
|
|
227 |
data.fail_reason,
|
228 |
data.how_to_fix,
|
229 |
FULL_CONVERSATION_MSG,
|
230 |
+
ANALYSIS_MSG,
|
231 |
+
)
|
232 |
+
)
|
233 |
# Adding individual snippet messages
|
234 |
qa_pairs.extend(sample_snippet_messages)
|
235 |
return qa_pairs
|
|
|
239 |
"""Splits dataset into training and evaluation subset"""
|
240 |
split = dataset.shape[0] // 5
|
241 |
dataset = dataset.sample(frac=1.0, random_state=seed)
|
242 |
+
train_dataset, test_dataset = dataset[split:], dataset[:split]
|
243 |
|
244 |
return train_dataset, test_dataset
|
245 |
|
246 |
|
247 |
+
def main(
|
248 |
+
hf_token,
|
249 |
+
data_path="results/results",
|
250 |
+
sanitize=True,
|
251 |
+
seed=42,
|
252 |
+
repo_id="fedora-copr/log_detective_qna",
|
253 |
+
):
|
254 |
"""Process entire directory and turn contents into parquet files."""
|
255 |
|
256 |
# For reproducibility
|
|
|
270 |
|
271 |
dataset = {
|
272 |
"train": datasets.Dataset.from_list(train_dataset),
|
273 |
+
"test": datasets.Dataset.from_list(test_dataset),
|
274 |
+
}
|
275 |
|
276 |
dataset = datasets.DatasetDict(dataset)
|
277 |
|
|
|
284 |
if __name__ == "__main__":
|
285 |
parser = argparse.ArgumentParser("Dataset Processor")
|
286 |
parser.add_argument(
|
287 |
+
"data_path", type=str, default="results/results", help="Path to annotations."
|
288 |
+
)
|
289 |
parser.add_argument(
|
290 |
+
"--sanitize",
|
291 |
+
action="store_true",
|
292 |
+
help="Run basic data cleanup procedure, dataset build may fail without it.",
|
293 |
+
)
|
294 |
parser.add_argument(
|
295 |
+
"--seed",
|
296 |
+
type=int,
|
297 |
+
default=42,
|
298 |
+
help="Seed for random generator to be used when generating split.",
|
299 |
+
)
|
300 |
parser.add_argument(
|
301 |
+
"--repo_id",
|
302 |
+
type=str,
|
303 |
+
default="fedora-copr/log_detective_qna",
|
304 |
+
help="ID of Hugging Face repo",
|
305 |
+
)
|
306 |
+
parser.add_argument(
|
307 |
+
"--token", type=str, required=True, help="Token for Hugging face"
|
308 |
+
)
|
309 |
args = parser.parse_args()
|
310 |
|
311 |
main(
|
312 |
+
data_path=args.data_path,
|
313 |
+
sanitize=args.sanitize,
|
314 |
+
seed=args.seed,
|
315 |
+
repo_id=args.repo_id,
|
316 |
+
hf_token=args.token,
|
317 |
+
)
|