Datasets:

Languages:
English
ArXiv:
License:
jdrechsel commited on
Commit
d0e4f47
·
verified ·
1 Parent(s): 3e61a26

Upload genter.py

Browse files
Files changed (1) hide show
  1. genter.py +92 -0
genter.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ from datasets import DatasetInfo, GeneratorBasedBuilder, Split, SplitGenerator, load_dataset, Features, Value
4
+
5
+
6
+ class GenterDataset(GeneratorBasedBuilder):
7
+ """
8
+ This dataset filters entries from BookCorpus based on provided indices in the Geneutral dataset.
9
+ """
10
+
11
+ _CITATION = """
12
+ @misc{drechsel2025gradiendmonosemanticfeaturelearning,
13
+ title={{GRADIEND}: Monosemantic Feature Learning within Neural Networks Applied to Gender Debiasing of Transformer Models},
14
+ author={Jonathan Drechsel and Steffen Herbold},
15
+ year={2025},
16
+ eprint={2502.01406},
17
+ archivePrefix={arXiv},
18
+ primaryClass={cs.LG},
19
+ url={https://arxiv.org/abs/2502.01406},
20
+ }
21
+ """
22
+
23
+ def _info(self):
24
+ return DatasetInfo(
25
+ description="This dataset consists of template sentences associating first names ([NAME]) with third-person singular pronouns ([PRONOUN])",
26
+ features=Features({
27
+ "index": Value("int32"),
28
+ "text": Value("string"),
29
+ "masked": Value("string"),
30
+ "label": Value("string"),
31
+ "name": Value("string"),
32
+ "pronoun": Value("string"),
33
+ "pronoun_count": Value("uint8"),
34
+ }),
35
+ supervised_keys=None,
36
+ citation=self._CITATION,
37
+ )
38
+
39
+ def _split_generators(self, dl_manager):
40
+ # URL for your indices file hosted on Hugging Face
41
+ indices_files = {}
42
+ for split in ["train", "val", "test"]:
43
+ index_file_url = f"https://huggingface.co/datasets/aieng-lab/genter/resolve/main/genter_indices_{split}.csv"
44
+ # Download the indices file
45
+ indices_file = dl_manager.download_and_extract(index_file_url)
46
+ indices_files[split] = indices_file
47
+
48
+ # Load BookCorpus dataset
49
+ print("Loading BookCorpus dataset...")
50
+ bookcorpus = load_dataset('bookcorpus', trust_remote_code=True)['train']
51
+ print("BookCorpus dataset loaded.")
52
+
53
+ return [
54
+ SplitGenerator(name=Split.TRAIN, gen_kwargs={"indices_file": indices_files['train'], "bookcorpus": bookcorpus}),
55
+ SplitGenerator(name=Split.VALIDATION, gen_kwargs={"indices_file": indices_files['val'], "bookcorpus": bookcorpus}),
56
+ SplitGenerator(name=Split.TEST, gen_kwargs={"indices_file": indices_files['test'], "bookcorpus": bookcorpus}),
57
+ ]
58
+
59
+ def _generate_examples(self, indices_file: str, bookcorpus):
60
+ """
61
+ Generate examples by filtering the BookCorpus dataset using provided indices.
62
+ """
63
+ try:
64
+ import pandas as pd
65
+ df = pd.read_csv(indices_file)
66
+ except ImportError:
67
+ raise ImportError("Please install pandas to generate GENTER.")
68
+
69
+ # Filter BookCorpus based on indices and yield examples
70
+ for _, sample in df.iterrows():
71
+ idx = sample['index']
72
+ name = sample['name']
73
+ pronoun = sample['pronoun']
74
+ assert pronoun in {'he', 'she'}
75
+ label = 'M' if pronoun == 'she' else 'F'
76
+
77
+ text = bookcorpus[idx]['text']
78
+
79
+ masked = re.sub(rf'\b{name}\b', '[NAME]', text)
80
+
81
+ pronoun_count = len(re.findall(rf'\b{pronoun}\b', text))
82
+ masked = re.sub(rf'\b{pronoun}\b', '[PRONOUN]', masked)
83
+
84
+ yield idx, {
85
+ "index": idx,
86
+ "text": text,
87
+ "masked": masked,
88
+ "label": label,
89
+ "name": name,
90
+ "pronoun": pronoun,
91
+ "pronoun_count": pronoun_count,
92
+ }