Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
9c205f3
1 Parent(s): fe9d5f8

Delete loading script

Browse files
Files changed (1) hide show
  1. lex_glue.py +0 -659
lex_glue.py DELETED
@@ -1,659 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """LexGLUE: A Benchmark Dataset for Legal Language Understanding in English."""
16
-
17
- import csv
18
- import json
19
- import textwrap
20
-
21
- import datasets
22
-
23
-
24
- MAIN_CITATION = """\
25
- @article{chalkidis-etal-2021-lexglue,
26
- title={{LexGLUE}: A Benchmark Dataset for Legal Language Understanding in English},
27
- author={Chalkidis, Ilias and
28
- Jana, Abhik and
29
- Hartung, Dirk and
30
- Bommarito, Michael and
31
- Androutsopoulos, Ion and
32
- Katz, Daniel Martin and
33
- Aletras, Nikolaos},
34
- year={2021},
35
- eprint={2110.00976},
36
- archivePrefix={arXiv},
37
- primaryClass={cs.CL},
38
- note = {arXiv: 2110.00976},
39
- }"""
40
-
41
- _DESCRIPTION = """\
42
- Legal General Language Understanding Evaluation (LexGLUE) benchmark is
43
- a collection of datasets for evaluating model performance across a diverse set of legal NLU tasks
44
- """
45
-
46
- ECTHR_ARTICLES = ["2", "3", "5", "6", "8", "9", "10", "11", "14", "P1-1"]
47
-
48
- EUROVOC_CONCEPTS = [
49
- "100163",
50
- "100168",
51
- "100169",
52
- "100170",
53
- "100171",
54
- "100172",
55
- "100173",
56
- "100174",
57
- "100175",
58
- "100176",
59
- "100177",
60
- "100179",
61
- "100180",
62
- "100183",
63
- "100184",
64
- "100185",
65
- "100186",
66
- "100187",
67
- "100189",
68
- "100190",
69
- "100191",
70
- "100192",
71
- "100193",
72
- "100194",
73
- "100195",
74
- "100196",
75
- "100197",
76
- "100198",
77
- "100199",
78
- "100200",
79
- "100201",
80
- "100202",
81
- "100204",
82
- "100205",
83
- "100206",
84
- "100207",
85
- "100212",
86
- "100214",
87
- "100215",
88
- "100220",
89
- "100221",
90
- "100222",
91
- "100223",
92
- "100224",
93
- "100226",
94
- "100227",
95
- "100229",
96
- "100230",
97
- "100231",
98
- "100232",
99
- "100233",
100
- "100234",
101
- "100235",
102
- "100237",
103
- "100238",
104
- "100239",
105
- "100240",
106
- "100241",
107
- "100242",
108
- "100243",
109
- "100244",
110
- "100245",
111
- "100246",
112
- "100247",
113
- "100248",
114
- "100249",
115
- "100250",
116
- "100252",
117
- "100253",
118
- "100254",
119
- "100255",
120
- "100256",
121
- "100257",
122
- "100258",
123
- "100259",
124
- "100260",
125
- "100261",
126
- "100262",
127
- "100263",
128
- "100264",
129
- "100265",
130
- "100266",
131
- "100268",
132
- "100269",
133
- "100270",
134
- "100271",
135
- "100272",
136
- "100273",
137
- "100274",
138
- "100275",
139
- "100276",
140
- "100277",
141
- "100278",
142
- "100279",
143
- "100280",
144
- "100281",
145
- "100282",
146
- "100283",
147
- "100284",
148
- "100285",
149
- ]
150
-
151
- LEDGAR_CATEGORIES = [
152
- "Adjustments",
153
- "Agreements",
154
- "Amendments",
155
- "Anti-Corruption Laws",
156
- "Applicable Laws",
157
- "Approvals",
158
- "Arbitration",
159
- "Assignments",
160
- "Assigns",
161
- "Authority",
162
- "Authorizations",
163
- "Base Salary",
164
- "Benefits",
165
- "Binding Effects",
166
- "Books",
167
- "Brokers",
168
- "Capitalization",
169
- "Change In Control",
170
- "Closings",
171
- "Compliance With Laws",
172
- "Confidentiality",
173
- "Consent To Jurisdiction",
174
- "Consents",
175
- "Construction",
176
- "Cooperation",
177
- "Costs",
178
- "Counterparts",
179
- "Death",
180
- "Defined Terms",
181
- "Definitions",
182
- "Disability",
183
- "Disclosures",
184
- "Duties",
185
- "Effective Dates",
186
- "Effectiveness",
187
- "Employment",
188
- "Enforceability",
189
- "Enforcements",
190
- "Entire Agreements",
191
- "Erisa",
192
- "Existence",
193
- "Expenses",
194
- "Fees",
195
- "Financial Statements",
196
- "Forfeitures",
197
- "Further Assurances",
198
- "General",
199
- "Governing Laws",
200
- "Headings",
201
- "Indemnifications",
202
- "Indemnity",
203
- "Insurances",
204
- "Integration",
205
- "Intellectual Property",
206
- "Interests",
207
- "Interpretations",
208
- "Jurisdictions",
209
- "Liens",
210
- "Litigations",
211
- "Miscellaneous",
212
- "Modifications",
213
- "No Conflicts",
214
- "No Defaults",
215
- "No Waivers",
216
- "Non-Disparagement",
217
- "Notices",
218
- "Organizations",
219
- "Participations",
220
- "Payments",
221
- "Positions",
222
- "Powers",
223
- "Publicity",
224
- "Qualifications",
225
- "Records",
226
- "Releases",
227
- "Remedies",
228
- "Representations",
229
- "Sales",
230
- "Sanctions",
231
- "Severability",
232
- "Solvency",
233
- "Specific Performance",
234
- "Submission To Jurisdiction",
235
- "Subsidiaries",
236
- "Successors",
237
- "Survival",
238
- "Tax Withholdings",
239
- "Taxes",
240
- "Terminations",
241
- "Terms",
242
- "Titles",
243
- "Transactions With Affiliates",
244
- "Use Of Proceeds",
245
- "Vacations",
246
- "Venues",
247
- "Vesting",
248
- "Waiver Of Jury Trials",
249
- "Waivers",
250
- "Warranties",
251
- "Withholdings",
252
- ]
253
-
254
- SCDB_ISSUE_AREAS = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"]
255
-
256
- UNFAIR_CATEGORIES = [
257
- "Limitation of liability",
258
- "Unilateral termination",
259
- "Unilateral change",
260
- "Content removal",
261
- "Contract by using",
262
- "Choice of law",
263
- "Jurisdiction",
264
- "Arbitration",
265
- ]
266
-
267
- CASEHOLD_LABELS = ["0", "1", "2", "3", "4"]
268
-
269
-
270
- class LexGlueConfig(datasets.BuilderConfig):
271
- """BuilderConfig for LexGLUE."""
272
-
273
- def __init__(
274
- self,
275
- text_column,
276
- label_column,
277
- url,
278
- data_url,
279
- data_file,
280
- citation,
281
- label_classes=None,
282
- multi_label=None,
283
- dev_column="dev",
284
- **kwargs,
285
- ):
286
- """BuilderConfig for LexGLUE.
287
-
288
- Args:
289
- text_column: ``string`, name of the column in the jsonl file corresponding
290
- to the text
291
- label_column: `string`, name of the column in the jsonl file corresponding
292
- to the label
293
- url: `string`, url for the original project
294
- data_url: `string`, url to download the zip file from
295
- data_file: `string`, filename for data set
296
- citation: `string`, citation for the data set
297
- url: `string`, url for information about the data set
298
- label_classes: `list[string]`, the list of classes if the label is
299
- categorical. If not provided, then the label will be of type
300
- `datasets.Value('float32')`.
301
- multi_label: `boolean`, True if the task is multi-label
302
- dev_column: `string`, name for the development subset
303
- **kwargs: keyword arguments forwarded to super.
304
- """
305
- super(LexGlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
306
- self.text_column = text_column
307
- self.label_column = label_column
308
- self.label_classes = label_classes
309
- self.multi_label = multi_label
310
- self.dev_column = dev_column
311
- self.url = url
312
- self.data_url = data_url
313
- self.data_file = data_file
314
- self.citation = citation
315
-
316
-
317
- class LexGLUE(datasets.GeneratorBasedBuilder):
318
- """LexGLUE: A Benchmark Dataset for Legal Language Understanding in English. Version 1.0"""
319
-
320
- BUILDER_CONFIGS = [
321
- LexGlueConfig(
322
- name="ecthr_a",
323
- description=textwrap.dedent(
324
- """\
325
- The European Court of Human Rights (ECtHR) hears allegations that a state has
326
- breached human rights provisions of the European Convention of Human Rights (ECHR).
327
- For each case, the dataset provides a list of factual paragraphs (facts) from the case description.
328
- Each case is mapped to articles of the ECHR that were violated (if any)."""
329
- ),
330
- text_column="facts",
331
- label_column="violated_articles",
332
- label_classes=ECTHR_ARTICLES,
333
- multi_label=True,
334
- dev_column="dev",
335
- data_url="https://zenodo.org/record/5532997/files/ecthr.tar.gz",
336
- data_file="ecthr.jsonl",
337
- url="https://archive.org/details/ECtHR-NAACL2021",
338
- citation=textwrap.dedent(
339
- """\
340
- @inproceedings{chalkidis-etal-2021-paragraph,
341
- title = "Paragraph-level Rationale Extraction through Regularization: A case study on {E}uropean Court of Human Rights Cases",
342
- author = "Chalkidis, Ilias and
343
- Fergadiotis, Manos and
344
- Tsarapatsanis, Dimitrios and
345
- Aletras, Nikolaos and
346
- Androutsopoulos, Ion and
347
- Malakasiotis, Prodromos",
348
- booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
349
- month = jun,
350
- year = "2021",
351
- address = "Online",
352
- publisher = "Association for Computational Linguistics",
353
- url = "https://aclanthology.org/2021.naacl-main.22",
354
- doi = "10.18653/v1/2021.naacl-main.22",
355
- pages = "226--241",
356
- }
357
- }"""
358
- ),
359
- ),
360
- LexGlueConfig(
361
- name="ecthr_b",
362
- description=textwrap.dedent(
363
- """\
364
- The European Court of Human Rights (ECtHR) hears allegations that a state has
365
- breached human rights provisions of the European Convention of Human Rights (ECHR).
366
- For each case, the dataset provides a list of factual paragraphs (facts) from the case description.
367
- Each case is mapped to articles of ECHR that were allegedly violated (considered by the court)."""
368
- ),
369
- text_column="facts",
370
- label_column="allegedly_violated_articles",
371
- label_classes=ECTHR_ARTICLES,
372
- multi_label=True,
373
- dev_column="dev",
374
- url="https://archive.org/details/ECtHR-NAACL2021",
375
- data_url="https://zenodo.org/record/5532997/files/ecthr.tar.gz",
376
- data_file="ecthr.jsonl",
377
- citation=textwrap.dedent(
378
- """\
379
- @inproceedings{chalkidis-etal-2021-paragraph,
380
- title = "Paragraph-level Rationale Extraction through Regularization: A case study on {E}uropean Court of Human Rights Cases",
381
- author = "Chalkidis, Ilias
382
- and Fergadiotis, Manos
383
- and Tsarapatsanis, Dimitrios
384
- and Aletras, Nikolaos
385
- and Androutsopoulos, Ion
386
- and Malakasiotis, Prodromos",
387
- booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
388
- year = "2021",
389
- address = "Online",
390
- url = "https://aclanthology.org/2021.naacl-main.22",
391
- }
392
- }"""
393
- ),
394
- ),
395
- LexGlueConfig(
396
- name="eurlex",
397
- description=textwrap.dedent(
398
- """\
399
- European Union (EU) legislation is published in EUR-Lex portal.
400
- All EU laws are annotated by EU's Publications Office with multiple concepts from the EuroVoc thesaurus,
401
- a multilingual thesaurus maintained by the Publications Office.
402
- The current version of EuroVoc contains more than 7k concepts referring to various activities
403
- of the EU and its Member States (e.g., economics, health-care, trade).
404
- Given a document, the task is to predict its EuroVoc labels (concepts)."""
405
- ),
406
- text_column="text",
407
- label_column="labels",
408
- label_classes=EUROVOC_CONCEPTS,
409
- multi_label=True,
410
- dev_column="dev",
411
- url="https://zenodo.org/record/5363165#.YVJOAi8RqaA",
412
- data_url="https://zenodo.org/record/5532997/files/eurlex.tar.gz",
413
- data_file="eurlex.jsonl",
414
- citation=textwrap.dedent(
415
- """\
416
- @inproceedings{chalkidis-etal-2021-multieurlex,
417
- author = {Chalkidis, Ilias and
418
- Fergadiotis, Manos and
419
- Androutsopoulos, Ion},
420
- title = {MultiEURLEX -- A multi-lingual and multi-label legal document
421
- classification dataset for zero-shot cross-lingual transfer},
422
- booktitle = {Proceedings of the 2021 Conference on Empirical Methods
423
- in Natural Language Processing},
424
- year = {2021},
425
- location = {Punta Cana, Dominican Republic},
426
- }
427
- }"""
428
- ),
429
- ),
430
- LexGlueConfig(
431
- name="scotus",
432
- description=textwrap.dedent(
433
- """\
434
- The US Supreme Court (SCOTUS) is the highest federal court in the United States of America
435
- and generally hears only the most controversial or otherwise complex cases which have not
436
- been sufficiently well solved by lower courts. This is a single-label multi-class classification
437
- task, where given a document (court opinion), the task is to predict the relevant issue areas.
438
- The 14 issue areas cluster 278 issues whose focus is on the subject matter of the controversy (dispute)."""
439
- ),
440
- text_column="text",
441
- label_column="issueArea",
442
- label_classes=SCDB_ISSUE_AREAS,
443
- multi_label=False,
444
- dev_column="dev",
445
- url="http://scdb.wustl.edu/data.php",
446
- data_url="https://zenodo.org/record/5532997/files/scotus.tar.gz",
447
- data_file="scotus.jsonl",
448
- citation=textwrap.dedent(
449
- """\
450
- @misc{spaeth2020,
451
- author = {Harold J. Spaeth and Lee Epstein and Andrew D. Martin, Jeffrey A. Segal
452
- and Theodore J. Ruger and Sara C. Benesh},
453
- year = {2020},
454
- title ={{Supreme Court Database, Version 2020 Release 01}},
455
- url= {http://Supremecourtdatabase.org},
456
- howpublished={Washington University Law}
457
- }"""
458
- ),
459
- ),
460
- LexGlueConfig(
461
- name="ledgar",
462
- description=textwrap.dedent(
463
- """\
464
- LEDGAR dataset aims contract provision (paragraph) classification.
465
- The contract provisions come from contracts obtained from the US Securities and Exchange Commission (SEC)
466
- filings, which are publicly available from EDGAR. Each label represents the single main topic
467
- (theme) of the corresponding contract provision."""
468
- ),
469
- text_column="text",
470
- label_column="clause_type",
471
- label_classes=LEDGAR_CATEGORIES,
472
- multi_label=False,
473
- dev_column="dev",
474
- url="https://metatext.io/datasets/ledgar",
475
- data_url="https://zenodo.org/record/5532997/files/ledgar.tar.gz",
476
- data_file="ledgar.jsonl",
477
- citation=textwrap.dedent(
478
- """\
479
- @inproceedings{tuggener-etal-2020-ledgar,
480
- title = "{LEDGAR}: A Large-Scale Multi-label Corpus for Text Classification of Legal Provisions in Contracts",
481
- author = {Tuggener, Don and
482
- von D{\"a}niken, Pius and
483
- Peetz, Thomas and
484
- Cieliebak, Mark},
485
- booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
486
- year = "2020",
487
- address = "Marseille, France",
488
- url = "https://aclanthology.org/2020.lrec-1.155",
489
- }
490
- }"""
491
- ),
492
- ),
493
- LexGlueConfig(
494
- name="unfair_tos",
495
- description=textwrap.dedent(
496
- """\
497
- The UNFAIR-ToS dataset contains 50 Terms of Service (ToS) from on-line platforms (e.g., YouTube,
498
- Ebay, Facebook, etc.). The dataset has been annotated on the sentence-level with 8 types of
499
- unfair contractual terms (sentences), meaning terms that potentially violate user rights
500
- according to the European consumer law."""
501
- ),
502
- text_column="text",
503
- label_column="labels",
504
- label_classes=UNFAIR_CATEGORIES,
505
- multi_label=True,
506
- dev_column="val",
507
- url="http://claudette.eui.eu",
508
- data_url="https://zenodo.org/record/5532997/files/unfair_tos.tar.gz",
509
- data_file="unfair_tos.jsonl",
510
- citation=textwrap.dedent(
511
- """\
512
- @article{lippi-etal-2019-claudette,
513
- title = "{CLAUDETTE}: an automated detector of potentially unfair clauses in online terms of service",
514
- author = {Lippi, Marco
515
- and Pałka, Przemysław
516
- and Contissa, Giuseppe
517
- and Lagioia, Francesca
518
- and Micklitz, Hans-Wolfgang
519
- and Sartor, Giovanni
520
- and Torroni, Paolo},
521
- journal = "Artificial Intelligence and Law",
522
- year = "2019",
523
- publisher = "Springer",
524
- url = "https://doi.org/10.1007/s10506-019-09243-2",
525
- pages = "117--139",
526
- }"""
527
- ),
528
- ),
529
- LexGlueConfig(
530
- name="case_hold",
531
- description=textwrap.dedent(
532
- """\
533
- The CaseHOLD (Case Holdings on Legal Decisions) dataset contains approx. 53k multiple choice
534
- questions about holdings of US court cases from the Harvard Law Library case law corpus.
535
- Holdings are short summaries of legal rulings accompany referenced decisions relevant for the present case.
536
- The input consists of an excerpt (or prompt) from a court decision, containing a reference
537
- to a particular case, while the holding statement is masked out. The model must identify
538
- the correct (masked) holding statement from a selection of five choices."""
539
- ),
540
- text_column="text",
541
- label_column="labels",
542
- dev_column="dev",
543
- multi_label=False,
544
- label_classes=CASEHOLD_LABELS,
545
- url="https://github.com/reglab/casehold",
546
- data_url="https://zenodo.org/record/5532997/files/casehold.tar.gz",
547
- data_file="casehold.csv",
548
- citation=textwrap.dedent(
549
- """\
550
- @inproceedings{Zheng2021,
551
- author = {Lucia Zheng and
552
- Neel Guha and
553
- Brandon R. Anderson and
554
- Peter Henderson and
555
- Daniel E. Ho},
556
- title = {When Does Pretraining Help? Assessing Self-Supervised Learning for
557
- Law and the CaseHOLD Dataset},
558
- year = {2021},
559
- booktitle = {International Conference on Artificial Intelligence and Law},
560
- }"""
561
- ),
562
- ),
563
- ]
564
-
565
- def _info(self):
566
- if self.config.name == "case_hold":
567
- features = {
568
- "context": datasets.Value("string"),
569
- "endings": datasets.features.Sequence(datasets.Value("string")),
570
- }
571
- elif "ecthr" in self.config.name:
572
- features = {"text": datasets.features.Sequence(datasets.Value("string"))}
573
- else:
574
- features = {"text": datasets.Value("string")}
575
- if self.config.multi_label:
576
- features["labels"] = datasets.features.Sequence(datasets.ClassLabel(names=self.config.label_classes))
577
- else:
578
- features["label"] = datasets.ClassLabel(names=self.config.label_classes)
579
- return datasets.DatasetInfo(
580
- description=self.config.description,
581
- features=datasets.Features(features),
582
- homepage=self.config.url,
583
- citation=self.config.citation + "\n" + MAIN_CITATION,
584
- )
585
-
586
- def _split_generators(self, dl_manager):
587
- archive = dl_manager.download(self.config.data_url)
588
- return [
589
- datasets.SplitGenerator(
590
- name=datasets.Split.TRAIN,
591
- # These kwargs will be passed to _generate_examples
592
- gen_kwargs={
593
- "filepath": self.config.data_file,
594
- "split": "train",
595
- "files": dl_manager.iter_archive(archive),
596
- },
597
- ),
598
- datasets.SplitGenerator(
599
- name=datasets.Split.TEST,
600
- # These kwargs will be passed to _generate_examples
601
- gen_kwargs={
602
- "filepath": self.config.data_file,
603
- "split": "test",
604
- "files": dl_manager.iter_archive(archive),
605
- },
606
- ),
607
- datasets.SplitGenerator(
608
- name=datasets.Split.VALIDATION,
609
- # These kwargs will be passed to _generate_examples
610
- gen_kwargs={
611
- "filepath": self.config.data_file,
612
- "split": self.config.dev_column,
613
- "files": dl_manager.iter_archive(archive),
614
- },
615
- ),
616
- ]
617
-
618
- def _generate_examples(self, filepath, split, files):
619
- """This function returns the examples in the raw (text) form."""
620
- if self.config.name == "case_hold":
621
- if "dummy" in filepath:
622
- SPLIT_RANGES = {"train": (1, 3), "dev": (3, 5), "test": (5, 7)}
623
- else:
624
- SPLIT_RANGES = {"train": (1, 45001), "dev": (45001, 48901), "test": (48901, 52501)}
625
- for path, f in files:
626
- if path == filepath:
627
- f = (line.decode("utf-8") for line in f)
628
- for id_, row in enumerate(list(csv.reader(f))[SPLIT_RANGES[split][0] : SPLIT_RANGES[split][1]]):
629
- yield id_, {
630
- "context": row[1],
631
- "endings": [row[2], row[3], row[4], row[5], row[6]],
632
- "label": str(row[12]),
633
- }
634
- break
635
- elif self.config.multi_label:
636
- for path, f in files:
637
- if path == filepath:
638
- for id_, row in enumerate(f):
639
- data = json.loads(row.decode("utf-8"))
640
- labels = sorted(
641
- list(set(data[self.config.label_column]).intersection(set(self.config.label_classes)))
642
- )
643
- if data["data_type"] == split:
644
- yield id_, {
645
- "text": data[self.config.text_column],
646
- "labels": labels,
647
- }
648
- break
649
- else:
650
- for path, f in files:
651
- if path == filepath:
652
- for id_, row in enumerate(f):
653
- data = json.loads(row.decode("utf-8"))
654
- if data["data_type"] == split:
655
- yield id_, {
656
- "text": data[self.config.text_column],
657
- "label": data[self.config.label_column],
658
- }
659
- break