Conrad747 commited on
Commit
9c81d80
1 Parent(s): fd7f535

Update lg-ner.py

Browse files
Files changed (1) hide show
  1. lg-ner.py +44 -62
lg-ner.py CHANGED
@@ -14,7 +14,7 @@
14
  # limitations under the License.
15
 
16
  # Lint as: python3
17
- """MasakhaNER: Named Entity Recognition for African Languages"""
18
 
19
  import datasets
20
 
@@ -23,52 +23,29 @@ logger = datasets.logging.get_logger(__name__)
23
 
24
 
25
  _CITATION = """\
26
- @article{Adelani2021MasakhaNERNE,
27
- title={MasakhaNER: Named Entity Recognition for African Languages},
28
- author={D. Adelani and Jade Abbott and Graham Neubig and Daniel D'Souza and Julia Kreutzer and Constantine Lignos
29
- and Chester Palen-Michel and Happy Buzaaba and Shruti Rijhwani and Sebastian Ruder and Stephen Mayhew and
30
- Israel Abebe Azime and S. Muhammad and Chris C. Emezue and Joyce Nakatumba-Nabende and Perez Ogayo and
31
- Anuoluwapo Aremu and Catherine Gitau and Derguene Mbaye and J. Alabi and Seid Muhie Yimam and Tajuddeen R. Gwadabe and
32
- Ignatius Ezeani and Rubungo Andre Niyongabo and Jonathan Mukiibi and V. Otiende and Iroro Orife and Davis David and
33
- Samba Ngom and Tosin P. Adewumi and Paul Rayson and Mofetoluwa Adeyemi and Gerald Muriuki and Emmanuel Anebi and
34
- C. Chukwuneke and N. Odu and Eric Peter Wairagala and S. Oyerinde and Clemencia Siro and Tobius Saul Bateesa and
35
- Temilola Oloyede and Yvonne Wambui and Victor Akinode and Deborah Nabagereka and Maurice Katusiime and
36
- Ayodele Awokoya and Mouhamadane Mboup and D. Gebreyohannes and Henok Tilaye and Kelechi Nwaike and Degaga Wolde and
37
- Abdoulaye Faye and Blessing Sibanda and Orevaoghene Ahia and Bonaventure F. P. Dossou and Kelechi Ogueji and
38
- Thierno Ibrahima Diop and A. Diallo and Adewale Akinfaderin and T. Marengereke and Salomey Osei},
39
- journal={ArXiv},
40
- year={2021},
41
- volume={abs/2103.11811}
42
  }
43
  """
44
 
45
  _DESCRIPTION = """\
46
- MasakhaNER is the first large publicly available high-quality dataset for named entity recognition (NER) in ten African languages.
47
- Named entities are phrases that contain the names of persons, organizations, locations, times and quantities.
48
- Example:
49
- [PER Wolff] , currently a journalist in [LOC Argentina] , played with [PER Del Bosque] in the final years of the seventies in [ORG Real Madrid] .
50
- MasakhaNER is a named entity dataset consisting of PER, ORG, LOC, and DATE entities annotated by Masakhane for ten African languages:
51
- - Amharic
52
- - Hausa
53
- - Igbo
54
- - Kinyarwanda
55
- - Luganda
56
- - Luo
57
- - Nigerian-Pidgin
58
- - Swahili
59
- - Wolof
60
- - Yoruba
61
- The train/validation/test sets are available for all the ten languages.
62
- For more details see https://arxiv.org/abs/2103.11811
63
  """
64
 
65
- _URL = "https://github.com/masakhane-io/masakhane-ner/raw/main/data/"
 
 
 
66
  _TRAINING_FILE = "train.txt"
67
- _DEV_FILE = "dev.txt"
68
  _TEST_FILE = "test.txt"
69
 
70
 
71
- class MasakhanerConfig(datasets.BuilderConfig):
72
  """BuilderConfig for Masakhaner"""
73
 
74
  def __init__(self, **kwargs):
@@ -76,25 +53,14 @@ class MasakhanerConfig(datasets.BuilderConfig):
76
  Args:
77
  **kwargs: keyword arguments forwarded to super.
78
  """
79
- super(MasakhanerConfig, self).__init__(**kwargs)
80
 
81
 
82
  class Masakhaner(datasets.GeneratorBasedBuilder):
83
  """Masakhaner dataset."""
84
 
85
  BUILDER_CONFIGS = [
86
- MasakhanerConfig(name="amh", version=datasets.Version("1.0.0"), description="Masakhaner Amharic dataset"),
87
- MasakhanerConfig(name="hau", version=datasets.Version("1.0.0"), description="Masakhaner Hausa dataset"),
88
- MasakhanerConfig(name="ibo", version=datasets.Version("1.0.0"), description="Masakhaner Igbo dataset"),
89
- MasakhanerConfig(name="kin", version=datasets.Version("1.0.0"), description="Masakhaner Kinyarwanda dataset"),
90
- MasakhanerConfig(name="lug", version=datasets.Version("1.0.0"), description="Masakhaner Luganda dataset"),
91
- MasakhanerConfig(name="luo", version=datasets.Version("1.0.0"), description="Masakhaner Luo dataset"),
92
- MasakhanerConfig(
93
- name="pcm", version=datasets.Version("1.0.0"), description="Masakhaner Nigerian-Pidgin dataset"
94
- ),
95
- MasakhanerConfig(name="swa", version=datasets.Version("1.0.0"), description="Masakhaner Swahili dataset"),
96
- MasakhanerConfig(name="wol", version=datasets.Version("1.0.0"), description="Masakhaner Wolof dataset"),
97
- MasakhanerConfig(name="yor", version=datasets.Version("1.0.0"), description="Masakhaner Yoruba dataset"),
98
  ]
99
 
100
  def _info(self):
@@ -108,36 +74,52 @@ class Masakhaner(datasets.GeneratorBasedBuilder):
108
  datasets.features.ClassLabel(
109
  names=[
110
  "O",
111
- "B-PER",
112
- "I-PER",
113
- "B-ORG",
114
- "I-ORG",
115
- "B-LOC",
116
- "I-LOC",
 
 
117
  "B-DATE",
118
  "I-DATE",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  ]
120
  )
121
  ),
122
  }
123
  ),
124
  supervised_keys=None,
125
- homepage="https://arxiv.org/abs/2103.11811",
126
  citation=_CITATION,
127
  )
128
 
129
  def _split_generators(self, dl_manager):
130
  """Returns SplitGenerators."""
131
  urls_to_download = {
132
- "train": f"{_URL}{self.config.name}/{_TRAINING_FILE}",
133
- "dev": f"{_URL}{self.config.name}/{_DEV_FILE}",
134
- "test": f"{_URL}{self.config.name}/{_TEST_FILE}",
135
  }
136
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
137
 
138
  return [
139
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
140
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
141
  datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
142
  ]
143
 
@@ -159,7 +141,7 @@ class Masakhaner(datasets.GeneratorBasedBuilder):
159
  tokens = []
160
  ner_tags = []
161
  else:
162
- # Masakhaner tokens are space separated
163
  splits = line.split(" ")
164
  tokens.append(splits[0])
165
  ner_tags.append(splits[1].rstrip())
 
14
  # limitations under the License.
15
 
16
  # Lint as: python3
17
+ """LugandaPII: PII for Luganda Language"""
18
 
19
  import datasets
20
 
 
23
 
24
 
25
  _CITATION = """\
26
+ @InProceedings{huggingface:dataset,
27
+ title = {Luganda Ner Dataset},
28
+ author={many authors
29
+ },
30
+ year={2022}
 
 
 
 
 
 
 
 
 
 
 
31
  }
32
  """
33
 
34
  _DESCRIPTION = """\
35
+ LugandaPII is a named entity dataset consisting of PERSON, ORG, LOCATION, NORP, USERID and DATE entities.
36
+ The train/validation/test sets are available for the Luganda language.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  """
38
 
39
+ # for github, replace "tree" with "raw" for example;
40
+ # "https://github.com/conradsuuna/luganda-ner-data/tree/main/data" =>
41
+ # "https://github.com/conradsuuna/luganda-ner-data/raw/main/data/"
42
+ _URL = "https://github.com/conradsuuna/luganda-ner-data/raw/main/data"
43
  _TRAINING_FILE = "train.txt"
44
+ _VAL_FILE = "val.txt"
45
  _TEST_FILE = "test.txt"
46
 
47
 
48
+ class LugPIIConfig(datasets.BuilderConfig):
49
  """BuilderConfig for Masakhaner"""
50
 
51
  def __init__(self, **kwargs):
 
53
  Args:
54
  **kwargs: keyword arguments forwarded to super.
55
  """
56
+ super(LugPIIConfig, self).__init__(**kwargs)
57
 
58
 
59
  class Masakhaner(datasets.GeneratorBasedBuilder):
60
  """Masakhaner dataset."""
61
 
62
  BUILDER_CONFIGS = [
63
+ LugPIIConfig(name="lug", version=datasets.Version("1.0.0"), description="PII NER Luganda dataset"),
 
 
 
 
 
 
 
 
 
 
 
64
  ]
65
 
66
  def _info(self):
 
74
  datasets.features.ClassLabel(
75
  names=[
76
  "O",
77
+ "B-PERSON",
78
+ "I-PERSON",
79
+ "L-PERSON",
80
+ "U-PERSON",
81
+ "B-NORP",
82
+ "I-NORP",
83
+ "L-NORP",
84
+ "U-NORP",
85
  "B-DATE",
86
  "I-DATE",
87
+ "L-DATE",
88
+ "U-DATE",
89
+ "B-USERID",
90
+ "I-USERID",
91
+ "L-USERID",
92
+ "U-USERID",
93
+ "B-ORG",
94
+ "I-ORG",
95
+ "L-ORG",
96
+ "U-ORG",
97
+ "B-LOCATION",
98
+ "I-LOCATION",
99
+ "L-LOCATION",
100
+ "U-LOCATION",
101
  ]
102
  )
103
  ),
104
  }
105
  ),
106
  supervised_keys=None,
107
+ homepage="",
108
  citation=_CITATION,
109
  )
110
 
111
  def _split_generators(self, dl_manager):
112
  """Returns SplitGenerators."""
113
  urls_to_download = {
114
+ "train": f"{_URL}/{_TRAINING_FILE}",
115
+ "val": f"{_URL}/{_VAL_FILE}",
116
+ "test": f"{_URL}/{_TEST_FILE}",
117
  }
118
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
119
 
120
  return [
121
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
122
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
123
  datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
124
  ]
125
 
 
141
  tokens = []
142
  ner_tags = []
143
  else:
144
+ # since our tokens are space separated
145
  splits = line.split(" ")
146
  tokens.append(splits[0])
147
  ner_tags.append(splits[1].rstrip())