gugaio commited on
Commit
a7a9c9e
·
1 Parent(s): e964166

Create notas-fiscais.py

Browse files
Files changed (1) hide show
  1. notas-fiscais.py +58 -0
notas-fiscais.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ _DESCRIPTION = """\
5
+ Dataset com imagens de comprovantes de pagamento e notas ficais no Brasil.
6
+ """
7
+ _URL_JSON="https://huggingface.co/datasets/gugaime/dokki-pagamentos/raw/main/train.jsonl"
8
+ _URL = "https://huggingface.co/datasets/gugaime/dokki-pagamentos/resolve/main/images.zip"
9
+
10
+
11
+ class Dokki(datasets.GeneratorBasedBuilder):
12
+
13
+ def _info(self):
14
+ return datasets.DatasetInfo(
15
+ description=_DESCRIPTION,
16
+ features=datasets.Features(
17
+ {
18
+ "image": datasets.Image(),
19
+ "id": datasets.Value("string"),
20
+ "ner_tags": datasets.Sequence(datasets.ClassLabel(num_classes=5, names=['O', 'cnpj', 'cpf', 'data', 'total'])),
21
+ "tokens": datasets.Sequence(datasets.Value("string")),
22
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64")))
23
+ }
24
+ ),
25
+ # No default supervised_keys (as we have to pass both question
26
+ # and context as input).
27
+ supervised_keys=None,
28
+ homepage="",
29
+ )
30
+
31
+ def _split_generators(self, dl_manager):
32
+ path = dl_manager.download(_URL)
33
+ image_iters = dl_manager.iter_archive(path)
34
+
35
+ json_path = dl_manager.download(_URL_JSON)
36
+
37
+ return [
38
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"images": image_iters, "json_path": json_path})
39
+ ]
40
+
41
+ def _generate_examples(self, images, json_path):
42
+ idx = 0
43
+ imageMap = {}
44
+ for filepath, image in images:
45
+ imageMap[filepath] = {"image": {"path": filepath, "bytes": image.read()}}
46
+
47
+ with open(json_path, 'r') as f:
48
+ for line in f:
49
+ data = json.loads(line)
50
+ path = data["imagePath"]
51
+ row=imageMap[path]
52
+ row["id"]=data["id"]
53
+ row["ner_tags"]=data["labels"]
54
+ row["tokens"]=data["texts"]
55
+ bboxex = [ [bbox[0][0], bbox[0][1], bbox[1][0], bbox[1][1]] for bbox in data["bboxes"]]
56
+ row["bboxes"]=bboxex
57
+ yield idx, row
58
+ idx += 1