chintagunta85 commited on
Commit
dc9ed5e
1 Parent(s): acaba8f

Upload species_800.py

Browse files
Files changed (1) hide show
  1. species_800.py +150 -0
species_800.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The SPECIES and ORGANISMS Resources for Fast and Accurate Identification of Taxonomic Names in Text"""
18
+
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _CITATION = """\
28
+ @article{pafilis2013species,
29
+ title={The SPECIES and ORGANISMS resources for fast and accurate identification of taxonomic names in text},
30
+ author={Pafilis, Evangelos and Frankild, Sune P and Fanini, Lucia and Faulwetter, Sarah and Pavloudi, Christina and Vasileiadou, Aikaterini and Arvanitidis, Christos and Jensen, Lars Juhl},
31
+ journal={PloS one},
32
+ volume={8},
33
+ number={6},
34
+ pages={e65390},
35
+ year={2013},
36
+ publisher={Public Library of Science}
37
+ }
38
+ """
39
+
40
+ _DESCRIPTION = """\
41
+ We have developed an efficient algorithm and implementation of a dictionary-based approach to named entity recognition,
42
+ which we here use to identifynames of species and other taxa in text. The tool, SPECIES, is more than an order of
43
+ magnitude faster and as accurate as existing tools. The precision and recall was assessed both on an existing gold-standard
44
+ corpus and on a new corpus of 800 abstracts, which were manually annotated after the development of the tool. The corpus
45
+ comprises abstracts from journals selected to represent many taxonomic groups, which gives insights into which types of
46
+ organism names are hard to detect and which are easy. Finally, we have tagged organism names in the entire Medline database
47
+ and developed a web resource, ORGANISMS, that makes the results accessible to the broad community of biologists.
48
+ """
49
+
50
+ _HOMEPAGE = "https://species.jensenlab.org/"
51
+ _URL = "https://drive.google.com/u/0/uc?id=1OletxmPYNkz2ltOr9pyT0b0iBtUWxslh&export=download/"
52
+ _BIOBERT_NER_DATASET_DIRECTORY = "s800"
53
+ _TRAINING_FILE = "train.tsv"
54
+ _DEV_FILE = "devel.tsv"
55
+ _TEST_FILE = "test.tsv"
56
+
57
+
58
+ class Species800Config(datasets.BuilderConfig):
59
+ """BuilderConfig for Species800"""
60
+
61
+ def __init__(self, **kwargs):
62
+ """BuilderConfig for Species800.
63
+ Args:
64
+ **kwargs: keyword arguments forwarded to super.
65
+ """
66
+ super(Species800Config, self).__init__(**kwargs)
67
+
68
+
69
+ class Species800(datasets.GeneratorBasedBuilder):
70
+ """Species800 dataset."""
71
+
72
+ BUILDER_CONFIGS = [
73
+ Species800Config(name="species_800", version=datasets.Version("1.0.0"), description="Species800 dataset"),
74
+ ]
75
+
76
+ def _info(self):
77
+ custom_names = ['O','B-GENE','I-GENE','B-CHEMICAL','I-CHEMICAL','B-DISEASE','I-DISEASE',
78
+ 'B-DNA', 'I-DNA', 'B-RNA', 'I-RNA', 'B-CELL_LINE', 'I-CELL_LINE', 'B-CELL_TYPE', 'I-CELL_TYPE',
79
+ 'B-PROTEIN', 'I-PROTEIN', 'B-SPECIES', 'I-SPECIES']
80
+ return datasets.DatasetInfo(
81
+ description=_DESCRIPTION,
82
+ features=datasets.Features(
83
+ {
84
+ "id": datasets.Value("string"),
85
+ "tokens": datasets.Sequence(datasets.Value("string")),
86
+ "ner_tags": datasets.Sequence(
87
+ datasets.features.ClassLabel(
88
+ names=custom_names
89
+ )
90
+ ),
91
+ }
92
+ ),
93
+ supervised_keys=None,
94
+ homepage=_HOMEPAGE,
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+ """Returns SplitGenerators."""
100
+ urls_to_download = {
101
+ "biobert_ner_datasets": _URL,
102
+ }
103
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
104
+ dataset_directory = os.path.join(downloaded_files["biobert_ner_datasets"], _BIOBERT_NER_DATASET_DIRECTORY)
105
+
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dataset_directory, _TRAINING_FILE)}
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(dataset_directory, _DEV_FILE)}
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(dataset_directory, _TEST_FILE)}
115
+ ),
116
+ ]
117
+
118
+ def _generate_examples(self, filepath):
119
+ logger.info("⏳ Generating examples from = %s", filepath)
120
+ with open(filepath, encoding="utf-8") as f:
121
+ guid = 0
122
+ tokens = []
123
+ ner_tags = []
124
+ for line in f:
125
+ if line == "" or line == "\n":
126
+ if tokens:
127
+ yield guid, {
128
+ "id": str(guid),
129
+ "tokens": tokens,
130
+ "ner_tags": ner_tags,
131
+ }
132
+ guid += 1
133
+ tokens = []
134
+ ner_tags = []
135
+ else:
136
+ # tokens are tab separated
137
+ splits = line.split("\t")
138
+ tokens.append(splits[0])
139
+ if(splits[1].rstrip()=="B"):
140
+ ner_tags.append("B-SPECIES")
141
+ elif(splits[1].rstrip()=="I"):
142
+ ner_tags.append("I-SPECIES")
143
+ else:
144
+ ner_tags.append(splits[1].rstrip())
145
+ # last example
146
+ yield guid, {
147
+ "id": str(guid),
148
+ "tokens": tokens,
149
+ "ner_tags": ner_tags,
150
+ }