Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
Dask
License:
File size: 3,429 Bytes
5926a2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import json
from datasets import Dataset, DatasetDict
from util import PARTITIONING_CATS


def construct_hf_dataset(metadata_file: str = "processed_sources.jsonl"):
    """Construct a HF DatasetDict class from the HICRIC processed data dir, and push to hub."""

    def data_generator(cat: str):
        def validate_tags(tags, partitioning_cats=PARTITIONING_CATS):
            # Find the intersection of the two lists
            matches = [tag for tag in tags if tag in partitioning_cats]

            # Raise an exception if there are none or two or more matches
            if len(matches) == 0 or len(matches) >= 2:
                raise ValueError(
                    f"The list of tags must contain exactly one key from the partitioning categories: {partitioning_cats}."
                )

            return True  # If the tags are valid

        # Open metadata file
        with open(metadata_file, "r") as metadata_f:
            for idx, line in enumerate(metadata_f):
                obj = json.loads(line)
                local_processed_path = obj["local_processed_path"]
                file_tags = obj["tags"]
                date_accessed = obj["date_accessed"]
                url = obj["url"]
                raw_md5 = obj["md5"]

                # Only proceed for relevant partition cat
                _valid = validate_tags(file_tags)
                if cat not in file_tags:
                    continue

                # Read the JSONL file pointed to by the `local_processed_path`
                with open(local_processed_path, "r") as data_file:
                    for _idx, data_line in enumerate(data_file):
                        data_obj = json.loads(data_line, strict=False)

                        # Get line specific data
                        text = data_obj.get("text", "")
                        line_tags = data_obj.get("tags", [])
                        if len(text) == 0:
                            continue
                        if len(line_tags) > 0:
                            tags = file_tags + line_tags
                        else:
                            tags = file_tags

                        rec = {
                            "text": data_obj.get("text", ""),
                            "tags": tags,
                            "date_accessed": date_accessed,
                            "source_url": url,
                            "source_md5": raw_md5,
                            "relative_path": local_processed_path,
                        }

                        # Add some specific partition keys
                        if cat == "case-description":
                            rec["decision"] = data_obj.get("decision", "unknown")
                            rec["appeal_type"] = data_obj.get("appeal_type", "unknown")

                        yield rec

    # Create a DatasetDict to store sub-directory datasets
    dataset_dict = DatasetDict()
    for cat in PARTITIONING_CATS:
        sub_dataset = Dataset.from_generator(
            generator=data_generator, gen_kwargs={"cat": cat}
        )
        dataset_dict[cat] = sub_dataset

    # Save each sub-directory dataset as a separate dataset within a DatasetDict
    for k, v in dataset_dict.items():
        v.push_to_hub("persius/hicric", k, private=True)
    # dataset_dict.save_to_disk("./arrow_data")

    return None


if __name__ == "__main__":
    construct_hf_dataset()