Datasets:

Languages:
English
Tags:
Not-For-All-Audiences
License:
Gaeros commited on
Commit
a733079
1 Parent(s): 2afc878

normalize: configurable caption files discovery

Browse files
Files changed (2) hide show
  1. normalize.toml +8 -1
  2. normalize_tags.py +18 -6
normalize.toml CHANGED
@@ -1,6 +1,13 @@
1
  # Tag Normalization Configuration
2
 
3
- # Use Underscores: Determines whether to use underscores or spaces in output tags
 
 
 
 
 
 
 
4
  # Default: false (use spaces)
5
  use_underscores = false
6
 
 
1
  # Tag Normalization Configuration
2
 
3
+ # Include Filename Regexp: Only file matching this regexp will be treated as
4
+ # captions. This is full-match regexps that must match the entire filename.
5
+ # Default: r".*?\.(txt|cap.*)$"
6
+ include_filename_regexp = ".*?\\.(txt|cap.*)$"
7
+
8
+ # Exclude Filename Regexp: Files matching this regexp will be excluded.
9
+ # Default: ".*samples?-prompts?.*"
10
+ exclude_filename_regexp = ".*samples?-prompts?.*"
11
  # Default: false (use spaces)
12
  use_underscores = false
13
 
normalize_tags.py CHANGED
@@ -221,6 +221,9 @@ def process_directory(
221
  if isinstance(keep_implied, list):
222
  encode = tagset_normalizer.tag_normalizer.encode
223
  keep_implied = {encode(t, t) for t in keep_implied}
 
 
 
224
 
225
  # Running stats
226
  counter = Counter()
@@ -229,12 +232,7 @@ def process_directory(
229
  skipped_files = 0
230
  blacklist_instances = 0
231
  implied_instances = 0
232
-
233
- files = [*dataset_root.glob("**/*.txt"), *dataset_root.glob("**/*.cap*")]
234
  for file in tqdm(files):
235
- if "sample-prompts" in file.name:
236
- skipped_files += 1
237
- continue
238
  tags = []
239
  with open(file, "rt") as fd:
240
  for chunk in RE_SEP.split(fd.read()):
@@ -288,6 +286,21 @@ def process_directory(
288
  )
289
 
290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
  def print_topk(
292
  counter: Counter,
293
  tagset_normalizer: TagSetNormalizer,
@@ -465,7 +478,6 @@ def main():
465
  )
466
  logger.info(f"Blacklist size: {len(blacklist)} tags")
467
 
468
- logger.info("🔍 Processing files...")
469
  start_time = time.time()
470
  stats = process_directory(
471
  input_dir,
 
221
  if isinstance(keep_implied, list):
222
  encode = tagset_normalizer.tag_normalizer.encode
223
  keep_implied = {encode(t, t) for t in keep_implied}
224
+ logger.debug(f"🔍 Gathering file list...")
225
+ files = walk_directory(dataset_root, config)
226
+ logger.info("💾 Processing %d files...", len(files))
227
 
228
  # Running stats
229
  counter = Counter()
 
232
  skipped_files = 0
233
  blacklist_instances = 0
234
  implied_instances = 0
 
 
235
  for file in tqdm(files):
 
 
 
236
  tags = []
237
  with open(file, "rt") as fd:
238
  for chunk in RE_SEP.split(fd.read()):
 
286
  )
287
 
288
 
289
+ def walk_directory(dataset_root: Path, config: dict):
290
+ exclude_re = re.compile(
291
+ config.get("exclude_filename_regexp", r".*samples?-prompts?.*")
292
+ )
293
+ include_re = re.compile(config.get("include_filename_regexp", r".*?\.(txt|cap.*)$"))
294
+ res = []
295
+ for root, dirs, files in dataset_root.walk(follow_symlinks=True):
296
+ dirs[:] = [d for d in dirs if not d.startswith(".")]
297
+ for file in files:
298
+ if not include_re.fullmatch(file) or exclude_re.fullmatch(file):
299
+ continue
300
+ res.append(root / file)
301
+ return res
302
+
303
+
304
  def print_topk(
305
  counter: Counter,
306
  tagset_normalizer: TagSetNormalizer,
 
478
  )
479
  logger.info(f"Blacklist size: {len(blacklist)} tags")
480
 
 
481
  start_time = time.time()
482
  stats = process_directory(
483
  input_dir,