Datasets:
Tasks:
Token Classification
Languages:
English
Size:
10K<n<100K
Tags:
Not-For-All-Audiences
License:
normalize: handle tags ending with '.' differently
Browse files- normalize_tags.py +11 -27
normalize_tags.py
CHANGED
@@ -2,12 +2,12 @@
|
|
2 |
|
3 |
import argparse
|
4 |
import logging
|
|
|
5 |
import re
|
|
|
6 |
import time
|
7 |
from collections import Counter
|
8 |
-
from itertools import chain
|
9 |
from pathlib import Path
|
10 |
-
import subprocess
|
11 |
|
12 |
try:
|
13 |
from tqdm import tqdm
|
@@ -207,27 +207,6 @@ RE_SEP = re.compile(r"[,\n]") # Split on commas and newlines
|
|
207 |
RE_ESCAPES = re.compile(r"\\+?(?=[():])") # Match backslash escapes before :()
|
208 |
|
209 |
|
210 |
-
def load_caption(fp: Path):
|
211 |
-
"""
|
212 |
-
Load caption from file and split out caption sentences.
|
213 |
-
|
214 |
-
Caption are formatted like this: tag1, tag2, sentence caption1., sentence
|
215 |
-
caption2. Optional sentence captions ending with "." are split out so that
|
216 |
-
they are left untouched.
|
217 |
-
"""
|
218 |
-
tags, captions = [], []
|
219 |
-
with open(fp, "rt") as fd:
|
220 |
-
for chunk in RE_SEP.split(fd.read()):
|
221 |
-
chunk = chunk.strip()
|
222 |
-
if not chunk:
|
223 |
-
continue
|
224 |
-
if chunk.endswith("."):
|
225 |
-
captions.append(chunk)
|
226 |
-
else:
|
227 |
-
tags.append(chunk)
|
228 |
-
return tags, captions
|
229 |
-
|
230 |
-
|
231 |
def process_directory(
|
232 |
dataset_root: Path,
|
233 |
output_dir: Path,
|
@@ -255,7 +234,13 @@ def process_directory(
|
|
255 |
if "sample-prompts" in file.name:
|
256 |
skipped_files += 1
|
257 |
continue
|
258 |
-
tags
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
orig_tags = tags
|
260 |
|
261 |
# Convert tags to ids, separate implied tags
|
@@ -288,9 +273,8 @@ def process_directory(
|
|
288 |
# Write output
|
289 |
output_file = output_dir / file.relative_to(dataset_root)
|
290 |
output_file.parent.mkdir(parents=True, exist_ok=True)
|
291 |
-
result = ", ".join(chain(tags, captions))
|
292 |
with open(output_file, "wt") as fd:
|
293 |
-
fd.write(
|
294 |
processed_files += 1
|
295 |
|
296 |
return dict(
|
@@ -334,7 +318,7 @@ def print_topk(
|
|
334 |
]
|
335 |
if cat in categories:
|
336 |
filtered_counter[tag] = count
|
337 |
-
elif "unknown" in categories:
|
338 |
filtered_counter[tag] = count
|
339 |
|
340 |
for tag, count in filtered_counter.most_common(n):
|
|
|
2 |
|
3 |
import argparse
|
4 |
import logging
|
5 |
+
import math
|
6 |
import re
|
7 |
+
import subprocess
|
8 |
import time
|
9 |
from collections import Counter
|
|
|
10 |
from pathlib import Path
|
|
|
11 |
|
12 |
try:
|
13 |
from tqdm import tqdm
|
|
|
207 |
RE_ESCAPES = re.compile(r"\\+?(?=[():])") # Match backslash escapes before :()
|
208 |
|
209 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
def process_directory(
|
211 |
dataset_root: Path,
|
212 |
output_dir: Path,
|
|
|
234 |
if "sample-prompts" in file.name:
|
235 |
skipped_files += 1
|
236 |
continue
|
237 |
+
tags = []
|
238 |
+
with open(file, "rt") as fd:
|
239 |
+
for chunk in RE_SEP.split(fd.read()):
|
240 |
+
chunk = chunk.strip()
|
241 |
+
if not chunk:
|
242 |
+
continue
|
243 |
+
tags.append(chunk)
|
244 |
orig_tags = tags
|
245 |
|
246 |
# Convert tags to ids, separate implied tags
|
|
|
273 |
# Write output
|
274 |
output_file = output_dir / file.relative_to(dataset_root)
|
275 |
output_file.parent.mkdir(parents=True, exist_ok=True)
|
|
|
276 |
with open(output_file, "wt") as fd:
|
277 |
+
fd.write(", ".join(tags))
|
278 |
processed_files += 1
|
279 |
|
280 |
return dict(
|
|
|
318 |
]
|
319 |
if cat in categories:
|
320 |
filtered_counter[tag] = count
|
321 |
+
elif "unknown" in categories and tag[-1] != ".":
|
322 |
filtered_counter[tag] = count
|
323 |
|
324 |
for tag, count in filtered_counter.most_common(n):
|