reyoung commited on
Commit
b59bbe1
·
1 Parent(s): 817c3cd

Polish multiprocessing code

Browse files
Files changed (1) hide show
  1. wikipedia.py +30 -27
wikipedia.py CHANGED
@@ -16,7 +16,6 @@
16
  # Lint as: python3
17
  """Wikipedia dataset containing cleaned articles of all languages."""
18
 
19
-
20
  import bz2
21
  import codecs
22
  import json
@@ -24,15 +23,13 @@ import re
24
  import xml.etree.cElementTree as etree
25
  from urllib.parse import quote
26
  import mwparserfromhell
27
- from multiprocess import Process, Manager
28
  from tqdm import tqdm
29
 
30
  import datasets
31
 
32
-
33
  logger = datasets.logging.get_logger(__name__)
34
 
35
-
36
  _CITATION = """\
37
  @ONLINE {wikidump,
38
  author = {Wikimedia Foundation},
@@ -878,7 +875,6 @@ CAT_ALIASES = {
878
  _BASE_URL_TMPL = "https://dumps.wikimedia.org/{lang}wiki/{date}/"
879
  _INFO_FILE = "dumpstatus.json"
880
 
881
-
882
  _VERSION = datasets.Version("2.0.0", "")
883
 
884
 
@@ -907,6 +903,9 @@ class WikipediaConfig(datasets.BuilderConfig):
907
  _DATE = "20220301"
908
 
909
 
 
 
 
910
  class Wikipedia(datasets.GeneratorBasedBuilder):
911
  """Wikipedia dataset."""
912
 
@@ -953,7 +952,7 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
953
  dump_info = json.load(f)
954
  multistream_dump_info = dump_info["jobs"]["articlesmultistreamdump"]
955
  assert (
956
- multistream_dump_info["status"] == "done"
957
  ), "Specified dump (%s) multistream status is not 'done': %s" % (
958
  _base_url(lang),
959
  multistream_dump_info["status"],
@@ -979,7 +978,6 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
979
 
980
  def _generate_examples(self, filepaths, language, no_labels=False):
981
 
982
-
983
  def _extract_content(filepath):
984
  """Extracts article content from a single WikiMedia XML file."""
985
  logger.info("generating examples from = %s", filepath)
@@ -1029,26 +1027,31 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
1029
  return id_, {"id": id_, "url": url, "title": title, "text": text}
1030
 
1031
  print("Parsing and cleaning Wikipedia examples")
1032
- with Manager() as manager:
1033
- examples = manager.list()
1034
- processes = []
1035
- for filepath in filepaths:
1036
- def parse_and_clean(examples):
1037
- content = _extract_content(filepath)
1038
- for obj in tqdm(content):
1039
- examples.append(_clean_content(obj, language=language))
1040
- p = Process(target=parse_and_clean, args=(examples,))
1041
- p.start()
1042
- processes.append(p)
1043
-
1044
- for p in processes:
1045
- p.join()
1046
-
1047
- print("Parsed and cleaned Wikipedia examples")
1048
-
1049
- for example in examples:
1050
- if example is not None:
1051
- yield example
 
 
 
 
 
1052
 
1053
 
1054
  def _parse_and_clean_wikicode(raw_content, parser, language):
 
16
  # Lint as: python3
17
  """Wikipedia dataset containing cleaned articles of all languages."""
18
 
 
19
  import bz2
20
  import codecs
21
  import json
 
23
  import xml.etree.cElementTree as etree
24
  from urllib.parse import quote
25
  import mwparserfromhell
26
+ import multiprocessing
27
  from tqdm import tqdm
28
 
29
  import datasets
30
 
 
31
  logger = datasets.logging.get_logger(__name__)
32
 
 
33
  _CITATION = """\
34
  @ONLINE {wikidump,
35
  author = {Wikimedia Foundation},
 
875
  _BASE_URL_TMPL = "https://dumps.wikimedia.org/{lang}wiki/{date}/"
876
  _INFO_FILE = "dumpstatus.json"
877
 
 
878
  _VERSION = datasets.Version("2.0.0", "")
879
 
880
 
 
903
  _DATE = "20220301"
904
 
905
 
906
+ class ProcessDone: pass
907
+
908
+
909
  class Wikipedia(datasets.GeneratorBasedBuilder):
910
  """Wikipedia dataset."""
911
 
 
952
  dump_info = json.load(f)
953
  multistream_dump_info = dump_info["jobs"]["articlesmultistreamdump"]
954
  assert (
955
+ multistream_dump_info["status"] == "done"
956
  ), "Specified dump (%s) multistream status is not 'done': %s" % (
957
  _base_url(lang),
958
  multistream_dump_info["status"],
 
978
 
979
  def _generate_examples(self, filepaths, language, no_labels=False):
980
 
 
981
  def _extract_content(filepath):
982
  """Extracts article content from a single WikiMedia XML file."""
983
  logger.info("generating examples from = %s", filepath)
 
1027
  return id_, {"id": id_, "url": url, "title": title, "text": text}
1028
 
1029
  print("Parsing and cleaning Wikipedia examples")
1030
+
1031
+ examples = multiprocessing.Queue()
1032
+
1033
+ def parse_and_clean(filepath):
1034
+ content = _extract_content(filepath)
1035
+ for obj in tqdm(content):
1036
+ examples.put(_clean_content(obj, language=language))
1037
+ examples.put(ProcessDone())
1038
+
1039
+ with multiprocessing.Pool() as pool:
1040
+ result = pool.map_async(parse_and_clean, filepaths)
1041
+ n = len(filepaths)
1042
+ complete = 0
1043
+ while complete != n:
1044
+ item = examples.get()
1045
+ if isinstance(item, ProcessDone):
1046
+ complete += 1
1047
+ continue
1048
+
1049
+ if examples is None:
1050
+ continue
1051
+
1052
+ yield examples
1053
+
1054
+ result.wait()
1055
 
1056
 
1057
  def _parse_and_clean_wikicode(raw_content, parser, language):