voorhs commited on
Commit
5b25b42
·
verified ·
1 Parent(s): abc8eba

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +94 -0
README.md CHANGED
@@ -52,3 +52,97 @@ configs:
52
  - split: intents
53
  path: intents/intents-*
54
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  - split: intents
53
  path: intents/intents-*
54
  ---
55
+
56
+ # clinc150
57
+
58
+ This is a text classification dataset. It is intended for machine learning research and experimentation.
59
+
60
+ This dataset is obtained via formatting another publicly available data to be compatible with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html).
61
+
62
+ ## Usage
63
+
64
+ It is intended to be used with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html):
65
+
66
+ ```python
67
+ from autointent import Dataset
68
+ banking77 = Dataset.from_datasets("AutoIntent/clinc150")
69
+ ```
70
+
71
+ ## Source
72
+
73
+ This dataset is taken from `cmaldona/All-Generalization-OOD-CLINC150` and formatted with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html):
74
+
75
+ ```python
76
+ # define util
77
+ """Convert clincq50 dataset to autointent internal format and scheme."""
78
+
79
+ from datasets import Dataset as HFDataset
80
+ from datasets import load_dataset
81
+
82
+ from autointent import Dataset
83
+ from autointent.schemas import Intent, Sample
84
+
85
+
86
+ def extract_intents_data(
87
+ clinc150_split: HFDataset, oos_intent_name: str = "ood"
88
+ ) -> tuple[list[Intent], dict[str, int]]:
89
+ """Extract intent names and assign ids to them."""
90
+ intent_names = sorted(clinc150_split.unique("labels"))
91
+ oos_intent_id = intent_names.index(oos_intent_name)
92
+ intent_names.pop(oos_intent_id)
93
+
94
+ n_classes = len(intent_names)
95
+ assert n_classes == 150 # noqa: PLR2004, S101
96
+
97
+ name_to_id = dict(zip(intent_names, range(n_classes), strict=False))
98
+ intents_data = [Intent(id=i, name=name) for name, i in name_to_id.items()]
99
+ return intents_data, name_to_id
100
+
101
+
102
+ def convert_clinc150(
103
+ clinc150_split: HFDataset,
104
+ name_to_id: dict[str, int],
105
+ shots_per_intent: int | None = None,
106
+ oos_intent_name: str = "ood",
107
+ ) -> list[Sample]:
108
+ """Convert one split into desired format."""
109
+ oos_samples = []
110
+ classwise_samples = [[] for _ in range(len(name_to_id))]
111
+ n_unrecognized_labels = 0
112
+
113
+ for batch in clinc150_split.iter(batch_size=16, drop_last_batch=False):
114
+ for txt, name in zip(batch["data"], batch["labels"], strict=False):
115
+ if name == oos_intent_name:
116
+ oos_samples.append(Sample(utterance=txt))
117
+ continue
118
+ intent_id = name_to_id.get(name, None)
119
+ if intent_id is None:
120
+ n_unrecognized_labels += 1
121
+ continue
122
+ target_list = classwise_samples[intent_id]
123
+ if shots_per_intent is not None and len(target_list) >= shots_per_intent:
124
+ continue
125
+ target_list.append(Sample(utterance=txt, label=intent_id))
126
+
127
+ in_domain_samples = [sample for samples_from_single_class in classwise_samples for sample in samples_from_single_class]
128
+
129
+ print(f"{len(in_domain_samples)=}")
130
+ print(f"{len(oos_samples)=}")
131
+ print(f"{n_unrecognized_labels=}\n")
132
+
133
+ return in_domain_samples + oos_samples
134
+
135
+
136
+ if __name__ == "__main__":
137
+ clinc150 = load_dataset("cmaldona/All-Generalization-OOD-CLINC150")
138
+
139
+ intents_data, name_to_id = extract_intents_data(clinc150["train"])
140
+
141
+ train_samples = convert_clinc150(clinc150["train"], name_to_id)
142
+ validation_samples = convert_clinc150(clinc150["validation"], name_to_id)
143
+ test_samples = convert_clinc150(clinc150["test"], name_to_id)
144
+
145
+ clinc150_converted = Dataset.from_dict(
146
+ {"train": train_samples, "validation": validation_samples, "test": test_samples, "intents": intents_data}
147
+ )
148
+ ```