shunk031 commited on
Commit
dc37d9b
1 Parent(s): c109b40

Initialize (#1)

Browse files

* [WIP] add files

* update Rico.py

* update

* update

* update

* update

* update README

* add settings for CI

* update README.md

* update README

* update

.github/workflows/ci.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+ paths-ignore:
9
+ - "README.md"
10
+
11
+ jobs:
12
+ test:
13
+ runs-on: ubuntu-latest
14
+ strategy:
15
+ matrix:
16
+ python-version: ["3.9", "3.10"]
17
+
18
+ steps:
19
+ - uses: actions/checkout@v3
20
+
21
+ - name: Set up Python ${{ matrix.python-version }}
22
+ uses: actions/setup-python@v4
23
+ with:
24
+ python-version: ${{ matrix.python-version }}
25
+
26
+ - name: Install dependencies
27
+ run: |
28
+ pip install -U pip setuptools wheel poetry
29
+ poetry install
30
+
31
+ - name: Format
32
+ run: |
33
+ poetry run black --check .
34
+
35
+ - name: Lint
36
+ run: |
37
+ poetry run ruff .
38
+
39
+ - name: Type check
40
+ run: |
41
+ poetry run mypy . \
42
+ --ignore-missing-imports \
43
+ --no-strict-optional \
44
+ --no-site-packages \
45
+ --cache-dir=/dev/null
46
+
47
+ - name: Run tests
48
+ run: |
49
+ poetry run pytest --color=yes -rf
.github/workflows/push_to_hub.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Hub
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows:
6
+ - CI
7
+ branches:
8
+ - main
9
+ types:
10
+ - completed
11
+
12
+ jobs:
13
+ push_to_hub:
14
+ runs-on: ubuntu-latest
15
+
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v3
19
+
20
+ - name: Push to Huggingface hub
21
+ env:
22
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
23
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
24
+ run: |
25
+ git fetch --unshallow
26
+ git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/Rico main
.gitignore ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python
3
+
4
+ ### Python ###
5
+ # Byte-compiled / optimized / DLL files
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # Distribution / packaging
14
+ .Python
15
+ build/
16
+ develop-eggs/
17
+ dist/
18
+ downloads/
19
+ eggs/
20
+ .eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .nox/
47
+ .coverage
48
+ .coverage.*
49
+ .cache
50
+ nosetests.xml
51
+ coverage.xml
52
+ *.cover
53
+ *.py,cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+ cover/
57
+
58
+ # Translations
59
+ *.mo
60
+ *.pot
61
+
62
+ # Django stuff:
63
+ *.log
64
+ local_settings.py
65
+ db.sqlite3
66
+ db.sqlite3-journal
67
+
68
+ # Flask stuff:
69
+ instance/
70
+ .webassets-cache
71
+
72
+ # Scrapy stuff:
73
+ .scrapy
74
+
75
+ # Sphinx documentation
76
+ docs/_build/
77
+
78
+ # PyBuilder
79
+ .pybuilder/
80
+ target/
81
+
82
+ # Jupyter Notebook
83
+ .ipynb_checkpoints
84
+
85
+ # IPython
86
+ profile_default/
87
+ ipython_config.py
88
+
89
+ # pyenv
90
+ # For a library or package, you might want to ignore these files since the code is
91
+ # intended to run in multiple environments; otherwise, check them in:
92
+ .python-version
93
+
94
+ # pipenv
95
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
97
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
98
+ # install all needed dependencies.
99
+ #Pipfile.lock
100
+
101
+ # poetry
102
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
103
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
104
+ # commonly ignored for libraries.
105
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
106
+ #poetry.lock
107
+
108
+ # pdm
109
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
110
+ #pdm.lock
111
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
112
+ # in version control.
113
+ # https://pdm.fming.dev/#use-with-ide
114
+ .pdm.toml
115
+
116
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117
+ __pypackages__/
118
+
119
+ # Celery stuff
120
+ celerybeat-schedule
121
+ celerybeat.pid
122
+
123
+ # SageMath parsed files
124
+ *.sage.py
125
+
126
+ # Environments
127
+ .env
128
+ .venv
129
+ env/
130
+ venv/
131
+ ENV/
132
+ env.bak/
133
+ venv.bak/
134
+
135
+ # Spyder project settings
136
+ .spyderproject
137
+ .spyproject
138
+
139
+ # Rope project settings
140
+ .ropeproject
141
+
142
+ # mkdocs documentation
143
+ /site
144
+
145
+ # mypy
146
+ .mypy_cache/
147
+ .dmypy.json
148
+ dmypy.json
149
+
150
+ # Pyre type checker
151
+ .pyre/
152
+
153
+ # pytype static type analyzer
154
+ .pytype/
155
+
156
+ # Cython debug symbols
157
+ cython_debug/
158
+
159
+ # PyCharm
160
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
163
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164
+ #.idea/
165
+
166
+ ### Python Patch ###
167
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
168
+ poetry.toml
169
+
170
+ # ruff
171
+ .ruff_cache/
172
+
173
+ # LSP config files
174
+ pyrightconfig.json
175
+
176
+ # End of https://www.toptal.com/developers/gitignore/api/python
README.md ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language:
5
+ - en
6
+ language_creators:
7
+ - found
8
+ license:
9
+ - unknown
10
+ multilinguality:
11
+ - monolingual
12
+ pretty_name: Rico
13
+ size_categories: []
14
+ source_datasets:
15
+ - original
16
+ tags:
17
+ - graphic design
18
+ task_categories:
19
+ - other
20
+ task_ids: []
21
+ ---
22
+
23
+ # Dataset Card for Rico
24
+
25
+ [![CI](https://github.com/shunk031/huggingface-datasets_Rico/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_Rico/actions/workflows/ci.yaml)
26
+
27
+ ## Table of Contents
28
+ - [Dataset Card Creation Guide](#dataset-card-creation-guide)
29
+ - [Table of Contents](#table-of-contents)
30
+ - [Dataset Description](#dataset-description)
31
+ - [Dataset Summary](#dataset-summary)
32
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
33
+ - [Languages](#languages)
34
+ - [Dataset Structure](#dataset-structure)
35
+ - [Data Instances](#data-instances)
36
+ - [Data Fields](#data-fields)
37
+ - [Data Splits](#data-splits)
38
+ - [Dataset Creation](#dataset-creation)
39
+ - [Curation Rationale](#curation-rationale)
40
+ - [Source Data](#source-data)
41
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
42
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
43
+ - [Annotations](#annotations)
44
+ - [Annotation process](#annotation-process)
45
+ - [Who are the annotators?](#who-are-the-annotators)
46
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
47
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
48
+ - [Social Impact of Dataset](#social-impact-of-dataset)
49
+ - [Discussion of Biases](#discussion-of-biases)
50
+ - [Other Known Limitations](#other-known-limitations)
51
+ - [Additional Information](#additional-information)
52
+ - [Dataset Curators](#dataset-curators)
53
+ - [Licensing Information](#licensing-information)
54
+ - [Citation Information](#citation-information)
55
+ - [Contributions](#contributions)
56
+
57
+ ## Dataset Description
58
+
59
+ - **Homepage:** http://www.interactionmining.org/rico.html
60
+ - **Repository:** https://github.com/shunk031/huggingface-datasets_Rico
61
+ - **Paper (UIST2017):** https://dl.acm.org/doi/10.1145/3126594.3126651
62
+
63
+ ### Dataset Summary
64
+
65
+ Rico: A Mobile App Dataset for Building Data-Driven Design Applications
66
+
67
+ ### Supported Tasks and Leaderboards
68
+
69
+ [More Information Needed]
70
+
71
+ ### Languages
72
+
73
+ [More Information Needed]
74
+
75
+ ## Dataset Structure
76
+
77
+ ### Data Instances
78
+
79
+ - UI screenshots and view hierarchies
80
+
81
+ ```python
82
+ import datasets as ds
83
+
84
+ dataset = ds.load_dataset(
85
+ path="shunk031/Rico",
86
+ name="ui-screenshots-and-view-hierarchies",
87
+ )
88
+ ```
89
+
90
+ - UI metadata
91
+
92
+ ```python
93
+ import datasets as ds
94
+
95
+ dataset = ds.load_dataset(
96
+ path="shunk031/Rico",
97
+ name="ui-metadata",
98
+ )
99
+ ```
100
+
101
+ - UI layout vectors
102
+
103
+ ```python
104
+ import datasets as ds
105
+
106
+ dataset = ds.load_dataset(
107
+ path="shunk031/Rico",
108
+ name="ui-layout-vectors",
109
+ )
110
+ ```
111
+
112
+ - Interaction traces
113
+
114
+ ```python
115
+ import datasets as ds
116
+
117
+ dataset = ds.load_dataset(
118
+ path="shunk031/Rico",
119
+ name="interaction-traces",
120
+ )
121
+ ```
122
+
123
+ - [WIP] Animations
124
+
125
+ ```python
126
+ import datasets as ds
127
+
128
+ dataset = ds.load_dataset(
129
+ path="shunk031/Rico",
130
+ name="animations",
131
+ )
132
+ ```
133
+
134
+ - Play store metadata
135
+
136
+ ```python
137
+ import datasets as ds
138
+
139
+ dataset = ds.load_dataset(
140
+ path="shunk031/Rico",
141
+ name="play-store-metadata",
142
+ )
143
+ ```
144
+
145
+ - UI screenshots and hierarchies with semantic annotations
146
+
147
+ ```python
148
+ import datasets as ds
149
+
150
+ dataset = ds.load_dataset(
151
+ path="shunk031/Rico",
152
+ name="ui-screenshots-and-hierarchies-with-semantic-annotations",
153
+ )
154
+ ```
155
+
156
+ ### Data Fields
157
+
158
+ [More Information Needed]
159
+
160
+ ### Data Splits
161
+
162
+ [More Information Needed]
163
+
164
+ ## Dataset Creation
165
+
166
+ ### Curation Rationale
167
+
168
+ [More Information Needed]
169
+
170
+ ### Source Data
171
+
172
+ [More Information Needed]
173
+
174
+ #### Initial Data Collection and Normalization
175
+
176
+ [More Information Needed]
177
+
178
+ #### Who are the source language producers?
179
+
180
+ [More Information Needed]
181
+
182
+ ### Annotations
183
+
184
+ [More Information Needed]
185
+
186
+ #### Annotation process
187
+
188
+ [More Information Needed]
189
+
190
+ #### Who are the annotators?
191
+
192
+ [More Information Needed]
193
+
194
+ ### Personal and Sensitive Information
195
+
196
+ [More Information Needed]
197
+
198
+ ## Considerations for Using the Data
199
+
200
+ ### Social Impact of Dataset
201
+
202
+ [More Information Needed]
203
+
204
+ ### Discussion of Biases
205
+
206
+ [More Information Needed]
207
+
208
+ ### Other Known Limitations
209
+
210
+ [More Information Needed]
211
+
212
+ ## Additional Information
213
+
214
+ ### Dataset Curators
215
+
216
+ [More Information Needed]
217
+
218
+ ### Licensing Information
219
+
220
+ [More Information Needed]
221
+
222
+ ### Citation Information
223
+
224
+ ```bibtex
225
+ @inproceedings{deka2017rico,
226
+ title={Rico: A mobile app dataset for building data-driven design applications},
227
+ author={Deka, Biplab and Huang, Zifeng and Franzen, Chad and Hibschman, Joshua and Afergan, Daniel and Li, Yang and Nichols, Jeffrey and Kumar, Ranjitha},
228
+ booktitle={Proceedings of the 30th annual ACM symposium on user interface software and technology},
229
+ pages={845--854},
230
+ year={2017}
231
+ }
232
+ ```
233
+
234
+ ### Contributions
235
+
236
+ Thanks to [DATA DRIVEN DESIGN GROUP UNIVERSITY OF ILLINOIS AT URBANA-CHAMPAIGN](http://ranjithakumar.net/) for creating this dataset.
Rico.py ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import json
3
+ import math
4
+ import pathlib
5
+ import re
6
+ from collections import defaultdict
7
+ from dataclasses import asdict, dataclass
8
+ from typing import Any, Dict, List, Optional, Tuple, TypedDict
9
+
10
+ import datasets as ds
11
+ import numpy as np
12
+ import pandas as pd
13
+ from datasets.utils.logging import get_logger
14
+ from PIL import Image
15
+ from PIL.Image import Image as PilImage
16
+
17
+ logger = get_logger(__name__)
18
+
19
+ JsonDict = Dict[str, Any]
20
+
21
+ _DESCRIPTION = """
22
+ THE DATASET: We mined over 9.3k free Android apps from 27 categories to create the Rico dataset. Apps in the dataset had an average user rating of 4.1. The Rico dataset contains visual, textual, structural, and interactive design properties of more than 66k unique UI screens and 3M UI elements.
23
+ """
24
+
25
+ _CITATION = """\
26
+ @inproceedings{deka2017rico,
27
+ title={Rico: A mobile app dataset for building data-driven design applications},
28
+ author={Deka, Biplab and Huang, Zifeng and Franzen, Chad and Hibschman, Joshua and Afergan, Daniel and Li, Yang and Nichols, Jeffrey and Kumar, Ranjitha},
29
+ booktitle={Proceedings of the 30th annual ACM symposium on user interface software and technology},
30
+ pages={845--854},
31
+ year={2017}
32
+ }
33
+ """
34
+
35
+ _HOMEPAGE = "http://www.interactionmining.org/rico.html"
36
+
37
+ _LICENSE = "Unknown"
38
+
39
+
40
+ def to_snake_case(name):
41
+ name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
42
+ name = re.sub("__([A-Z])", r"_\1", name)
43
+ name = re.sub("([a-z0-9])([A-Z])", r"\1_\2", name)
44
+ return name.lower()
45
+
46
+
47
+ class TrainValidationTestSplit(TypedDict):
48
+ train: List[Any]
49
+ validation: List[Any]
50
+ test: List[Any]
51
+
52
+
53
+ class UiLayoutVectorSample(TypedDict):
54
+ vector: np.ndarray
55
+ name: str
56
+
57
+
58
+ @dataclass(eq=True)
59
+ class RicoProcessor(object, metaclass=abc.ABCMeta):
60
+ @abc.abstractmethod
61
+ def get_features(self) -> ds.Features:
62
+ raise NotImplementedError
63
+
64
+ @abc.abstractmethod
65
+ def load_examples(self, *args, **kwargs) -> List[Any]:
66
+ raise NotImplementedError
67
+
68
+ @abc.abstractmethod
69
+ def split_generators(self, *args, **kwargs) -> List[ds.SplitGenerator]:
70
+ raise NotImplementedError
71
+
72
+ @abc.abstractmethod
73
+ def generate_examples(self, examples: List[Any]):
74
+ raise NotImplementedError
75
+
76
+
77
+ class RicoTaskProcessor(RicoProcessor, metaclass=abc.ABCMeta):
78
+ def _flatten_children(
79
+ self,
80
+ children,
81
+ children_id: Optional[int] = None,
82
+ result: Optional[Dict[str, Any]] = None,
83
+ ):
84
+ result = result or defaultdict(list)
85
+ if children is None:
86
+ return result
87
+
88
+ children_id = children_id or 0
89
+
90
+ for child in children:
91
+ if not child:
92
+ continue
93
+
94
+ if "children" not in child:
95
+ continue
96
+
97
+ result = self._flatten_children(
98
+ children=child.pop("children"),
99
+ children_id=children_id + 1,
100
+ result=result,
101
+ )
102
+ assert result is not None
103
+ result[f"children_{children_id}"].append(child)
104
+
105
+ return result
106
+
107
+ def _load_image(self, file_path: pathlib.Path) -> PilImage:
108
+ logger.debug(f"Load from {file_path}")
109
+ return Image.open(file_path)
110
+
111
+ def _load_json(self, file_path: pathlib.Path) -> JsonDict:
112
+ logger.debug(f"Load from {file_path}")
113
+ with file_path.open("r") as rf:
114
+ json_dict = json.load(rf)
115
+ return json_dict
116
+
117
+ def _split_dataset(
118
+ self,
119
+ examples: List[Any],
120
+ train_ratio: float,
121
+ validation_ratio: float,
122
+ test_ratio: float,
123
+ ) -> TrainValidationTestSplit:
124
+ assert train_ratio + validation_ratio + test_ratio == 1.0
125
+ num_examples = len(examples)
126
+
127
+ num_tng = math.ceil(num_examples * train_ratio) # type: ignore
128
+ num_val = math.ceil(num_examples * validation_ratio) # type: ignore
129
+ num_tst = math.ceil(num_examples * test_ratio) # type: ignore
130
+
131
+ tng_examples = examples[:num_tng]
132
+ val_examples = examples[num_tng : num_tng + num_val]
133
+ tst_examples = examples[num_tng + num_val : num_tng + num_val + num_tst]
134
+ assert len(tng_examples) + len(val_examples) + len(tst_examples) == num_examples
135
+
136
+ return {
137
+ "train": tng_examples,
138
+ "validation": val_examples,
139
+ "test": tst_examples,
140
+ }
141
+
142
+ def _load_and_split_dataset(
143
+ self,
144
+ base_dir: pathlib.Path,
145
+ train_ratio: float,
146
+ validation_ratio: float,
147
+ test_ratio: float,
148
+ ) -> TrainValidationTestSplit:
149
+ examples = self.load_examples(base_dir)
150
+ return self._split_dataset(
151
+ examples=examples,
152
+ train_ratio=train_ratio,
153
+ validation_ratio=validation_ratio,
154
+ test_ratio=test_ratio,
155
+ )
156
+
157
+ def split_generators(
158
+ self,
159
+ base_dir: pathlib.Path,
160
+ train_ratio: float,
161
+ validation_ratio: float,
162
+ test_ratio: float,
163
+ ) -> List[ds.SplitGenerator]:
164
+ split_examples = self._load_and_split_dataset(
165
+ base_dir=pathlib.Path(base_dir),
166
+ train_ratio=train_ratio,
167
+ validation_ratio=validation_ratio,
168
+ test_ratio=test_ratio,
169
+ )
170
+
171
+ return [
172
+ ds.SplitGenerator(
173
+ name=ds.Split.TRAIN, # type: ignore
174
+ gen_kwargs={"examples": split_examples["train"]},
175
+ ),
176
+ ds.SplitGenerator(
177
+ name=ds.Split.VALIDATION, # type: ignore
178
+ gen_kwargs={"examples": split_examples["validation"]},
179
+ ),
180
+ ds.SplitGenerator(
181
+ name=ds.Split.TEST, # type: ignore
182
+ gen_kwargs={"examples": split_examples["test"]},
183
+ ),
184
+ ]
185
+
186
+ @abc.abstractmethod
187
+ def load_examples(self, base_dir: pathlib.Path) -> List[Any]:
188
+ raise NotImplementedError
189
+
190
+
191
+ class RicoMetadataProcessor(RicoProcessor, metaclass=abc.ABCMeta):
192
+ @abc.abstractmethod
193
+ def load_examples(self, csv_file: pathlib.Path) -> List[Any]:
194
+ raise NotImplementedError
195
+
196
+ @abc.abstractmethod
197
+ def split_generators(self, csv_file: pathlib.Path) -> List[ds.SplitGenerator]:
198
+ raise NotImplementedError
199
+
200
+
201
+ @dataclass
202
+ class ActivityClass(object):
203
+ abs_pos: bool
204
+ adapter_view: bool
205
+ ancestors: List[str]
206
+ bounds: Tuple[int, int, int, int]
207
+ clickable: bool
208
+ content_desc: List[str]
209
+ draw: bool
210
+ enabled: bool
211
+ focused: bool
212
+ focusable: bool
213
+ klass: str
214
+ long_clickable: bool
215
+ pressed: bool
216
+ pointer: str
217
+ scrollable_horizontal: bool
218
+ scrollable_vertical: bool
219
+ selected: bool
220
+ visibility: str
221
+ visible_to_user: bool
222
+
223
+ package: Optional[str] = None
224
+ resource_id: Optional[str] = None
225
+ rel_bounds: Optional[Tuple[int, int, int, int]] = None
226
+
227
+ @classmethod
228
+ def from_dict(cls, json_dict: JsonDict) -> "ActivityClass":
229
+ json_dict = {k.replace("-", "_"): v for k, v in json_dict.items()}
230
+ json_dict["klass"] = json_dict.pop("class")
231
+ return cls(**json_dict)
232
+
233
+
234
+ @dataclass
235
+ class UiComponent(object):
236
+ ancestors: List[str]
237
+ bounds: Tuple[int, int, int, int]
238
+ component_label: str
239
+ clickable: bool
240
+ klass: str
241
+
242
+ icon_class: Optional[str] = None
243
+ resource_id: Optional[str] = None
244
+
245
+ @classmethod
246
+ def from_dict(cls, json_dict: JsonDict) -> "UiComponent":
247
+ json_dict = {
248
+ to_snake_case(k.replace("-", "_")): v for k, v in json_dict.items()
249
+ }
250
+ json_dict["klass"] = json_dict.pop("class")
251
+ return cls(**json_dict)
252
+
253
+
254
+ @dataclass
255
+ class Activity(object):
256
+ root: ActivityClass
257
+ children: List[List[ActivityClass]]
258
+ added_fragments: List[str]
259
+ active_fragments: List[str]
260
+
261
+ @classmethod
262
+ def from_dict(cls, json_dict: JsonDict) -> "Activity":
263
+ root = ActivityClass.from_dict(json_dict.pop("root"))
264
+ children = [
265
+ [
266
+ ActivityClass.from_dict(activity_class)
267
+ for activity_class in activity_classes
268
+ ]
269
+ for activity_classes in json_dict.pop("children")
270
+ ]
271
+ return cls(root=root, children=children, **json_dict)
272
+
273
+
274
+ @dataclass
275
+ class InteractionTracesData(object):
276
+ activity_name: str
277
+ activity: Activity
278
+ is_keyboard_deployed: str
279
+ request_id: str
280
+
281
+ @classmethod
282
+ def from_dict(cls, json_dict: JsonDict) -> "InteractionTracesData":
283
+ activity_dict = json_dict.pop("activity")
284
+ activity = Activity.from_dict(activity_dict)
285
+ return cls(activity=activity, **json_dict)
286
+
287
+
288
+ @dataclass
289
+ class UiScreenshotsAndViewHierarchiesData(InteractionTracesData):
290
+ screenshot: PilImage
291
+
292
+ @classmethod
293
+ def from_dict(cls, json_dict: JsonDict) -> "UiScreenshotsAndViewHierarchiesData":
294
+ activity_dict = json_dict.pop("activity")
295
+ activity = Activity.from_dict(activity_dict)
296
+ return cls(activity=activity, **json_dict)
297
+
298
+
299
+ @dataclass
300
+ class UiScreenshotsAndHierarchiesWithSemanticAnnotationsData(object):
301
+ ancestors: List[str]
302
+ klass: str
303
+ bounds: Tuple[int, int, int, int]
304
+ clickable: bool
305
+ children: List[List[UiComponent]]
306
+ screenshot: PilImage
307
+
308
+ @classmethod
309
+ def from_dict(
310
+ cls, json_dict: JsonDict
311
+ ) -> "UiScreenshotsAndHierarchiesWithSemanticAnnotationsData":
312
+ json_dict["klass"] = json_dict.pop("class")
313
+ children = [
314
+ [UiComponent.from_dict(ui_component) for ui_component in ui_components]
315
+ for ui_components in json_dict.pop("children")
316
+ ]
317
+ return cls(children=children, **json_dict)
318
+
319
+
320
+ @dataclass
321
+ class Gesture(object):
322
+ ui_id: int
323
+ xy: List[Tuple[float, float]]
324
+
325
+ @classmethod
326
+ def from_dict_to_gestures(cls, json_dict: JsonDict) -> List["Gesture"]:
327
+ return [Gesture(ui_id=int(k), xy=v) for k, v in json_dict.items()]
328
+
329
+
330
+ class InteractionTracesProcessor(RicoTaskProcessor):
331
+ def get_activity_class_features_dict(self):
332
+ return {
333
+ "abs_pos": ds.Value("bool"),
334
+ "adapter_view": ds.Value("bool"),
335
+ "ancestors": ds.Sequence(ds.Value("string")),
336
+ "bounds": ds.Sequence(ds.Value("int64")),
337
+ "clickable": ds.Value("bool"),
338
+ "content_desc": ds.Sequence(ds.Value("string")),
339
+ "draw": ds.Value("bool"),
340
+ "enabled": ds.Value("bool"),
341
+ "focusable": ds.Value("bool"),
342
+ "focused": ds.Value("bool"),
343
+ "klass": ds.Value("string"),
344
+ "long_clickable": ds.Value("bool"),
345
+ "package": ds.Value("string"),
346
+ "pressed": ds.Value("string"),
347
+ "pointer": ds.Value("string"),
348
+ "rel_bounds": ds.Sequence(ds.Value("int64")),
349
+ "resource_id": ds.Value("string"),
350
+ "scrollable_horizontal": ds.Value("bool"),
351
+ "scrollable_vertical": ds.Value("bool"),
352
+ "selected": ds.Value("bool"),
353
+ "visibility": ds.Value("string"),
354
+ "visible_to_user": ds.Value("bool"),
355
+ }
356
+
357
+ def get_activity_features_dict(self, activity_class):
358
+ return {
359
+ "activity_name": ds.Value("string"),
360
+ "activity": {
361
+ "root": activity_class,
362
+ "children": ds.Sequence(ds.Sequence(activity_class)),
363
+ "added_fragments": ds.Sequence(ds.Value("string")),
364
+ "active_fragments": ds.Sequence(ds.Value("string")),
365
+ },
366
+ "is_keyboard_deployed": ds.Value("bool"),
367
+ "request_id": ds.Value("string"),
368
+ }
369
+
370
+ def get_features(self) -> ds.Features:
371
+ activity_class = self.get_activity_class_features_dict()
372
+ activity = self.get_activity_features_dict(activity_class)
373
+ return ds.Features(
374
+ {
375
+ "screenshots": ds.Sequence(ds.Image()),
376
+ "view_hierarchies": ds.Sequence(activity),
377
+ "gestures": ds.Sequence(
378
+ {
379
+ "ui_id": ds.Value("int32"),
380
+ "xy": ds.Sequence(ds.Sequence(ds.Value("float32"))),
381
+ }
382
+ ),
383
+ }
384
+ )
385
+
386
+ def load_examples(self, base_dir: pathlib.Path) -> List[pathlib.Path]:
387
+ task_dir = base_dir / "filtered_traces"
388
+ return [d for d in task_dir.iterdir() if d.is_dir()]
389
+
390
+ def generate_examples(self, examples: List[pathlib.Path]):
391
+ idx = 0
392
+ for trace_base_dir in examples:
393
+ for trace_dir in trace_base_dir.iterdir():
394
+ screenshots_dir = trace_dir / "screenshots"
395
+ screenshots = [
396
+ self._load_image(f)
397
+ for f in screenshots_dir.iterdir()
398
+ if not f.name.startswith("._")
399
+ ]
400
+
401
+ view_hierarchies_dir = trace_dir / "view_hierarchies"
402
+ view_hierarchies_json_files = [
403
+ f
404
+ for f in view_hierarchies_dir.iterdir()
405
+ if f.suffix == ".json" and not f.name.startswith("._")
406
+ ]
407
+ view_hierarchies_jsons = []
408
+ for json_file in view_hierarchies_json_files:
409
+ json_dict = self._load_json(json_file)
410
+ if json_dict is None:
411
+ logger.warning(f"Invalid json file: {json_file}")
412
+ continue
413
+
414
+ children = self._flatten_children(
415
+ children=json_dict["activity"]["root"].pop("children")
416
+ )
417
+
418
+ json_dict["activity"]["children"] = [v for v in children.values()]
419
+ data = InteractionTracesData.from_dict(json_dict)
420
+ view_hierarchies_jsons.append(asdict(data))
421
+
422
+ gestures_json = trace_dir / "gestures.json"
423
+ with gestures_json.open("r") as rf:
424
+ gestures_dict = json.load(rf)
425
+ gestures = Gesture.from_dict_to_gestures(gestures_dict)
426
+
427
+ example = {
428
+ "screenshots": screenshots,
429
+ "view_hierarchies": view_hierarchies_jsons,
430
+ "gestures": [asdict(gesture) for gesture in gestures],
431
+ }
432
+ yield idx, example
433
+ idx += 1
434
+
435
+
436
+ class UiScreenshotsAndViewHierarchiesProcessor(InteractionTracesProcessor):
437
+ def get_features(self) -> ds.Features:
438
+ activity_class = self.get_activity_class_features_dict()
439
+ activity = {
440
+ "screenshot": ds.Image(),
441
+ **self.get_activity_features_dict(activity_class),
442
+ }
443
+ return ds.Features(activity)
444
+
445
+ def load_examples(self, base_dir: pathlib.Path) -> List[Any]:
446
+ task_dir = base_dir / "combined"
447
+ json_files = [f for f in task_dir.iterdir() if f.suffix == ".json"]
448
+ return json_files
449
+
450
+ def generate_examples(self, examples: List[pathlib.Path]):
451
+ for i, json_file in enumerate(examples):
452
+ with json_file.open("r") as rf:
453
+ json_dict = json.load(rf)
454
+ children = self._flatten_children(
455
+ children=json_dict["activity"]["root"].pop("children")
456
+ )
457
+ json_dict["activity"]["children"] = [v for v in children.values()]
458
+ json_dict["screenshot"] = self._load_image(
459
+ json_file.parent / f"{json_file.stem}.jpg"
460
+ )
461
+ data = UiScreenshotsAndViewHierarchiesData.from_dict(json_dict)
462
+ example = asdict(data)
463
+ yield i, example
464
+
465
+
466
+ class UiLayoutVectorsProcessor(RicoTaskProcessor):
467
+ def get_features(self) -> ds.Features:
468
+ return ds.Features(
469
+ {"vector": ds.Sequence(ds.Value("float32")), "name": ds.Value("string")}
470
+ )
471
+
472
+ def _load_ui_vectors(self, file_path: pathlib.Path) -> np.ndarray:
473
+ logger.debug(f"Load from {file_path}")
474
+ ui_vectors = np.load(file_path)
475
+ assert ui_vectors.shape[1] == 64
476
+ return ui_vectors
477
+
478
+ def _load_ui_names(self, file_path: pathlib.Path) -> List[str]:
479
+ with file_path.open("r") as rf:
480
+ json_dict = json.load(rf)
481
+ return json_dict["ui_names"]
482
+
483
+ def load_examples(self, base_dir: pathlib.Path) -> List[UiLayoutVectorSample]:
484
+ task_dir = base_dir / "ui_layout_vectors"
485
+ ui_vectors = self._load_ui_vectors(file_path=task_dir / "ui_vectors.npy")
486
+ ui_names = self._load_ui_names(file_path=task_dir / "ui_names.json")
487
+ assert len(ui_vectors) == len(ui_names)
488
+
489
+ return [
490
+ {"vector": vector, "name": name}
491
+ for vector, name in zip(ui_vectors, ui_names)
492
+ ]
493
+
494
+ def generate_examples(self, examples: List[UiLayoutVectorSample]):
495
+ for i, sample in enumerate(examples):
496
+ sample["vector"] = sample["vector"].tolist()
497
+ yield i, sample
498
+
499
+
500
+ class AnimationsProcessor(RicoTaskProcessor):
501
+ def get_features(self) -> ds.Features:
502
+ raise NotImplementedError
503
+
504
+ def load_examples(self, base_dir: pathlib.Path) -> List[Any]:
505
+ raise NotImplementedError
506
+
507
+ def generate_examples(self, examples: List[Any]):
508
+ raise NotImplementedError
509
+
510
+
511
+ class UiScreenshotsAndHierarchiesWithSemanticAnnotationsProcessor(RicoTaskProcessor):
512
+ def get_features(self) -> ds.Features:
513
+ ui_component = {
514
+ "ancestors": ds.Sequence(ds.Value("string")),
515
+ "bounds": ds.Sequence(ds.Value("int64")),
516
+ "component_label": ds.ClassLabel(
517
+ num_classes=25,
518
+ names=[
519
+ "Text",
520
+ "Image",
521
+ "Icon",
522
+ "Text Button",
523
+ "List Item",
524
+ "Input",
525
+ "Background Image",
526
+ "Card",
527
+ "Web View",
528
+ "Radio Button",
529
+ "Drawer",
530
+ "Checkbox",
531
+ "Advertisement",
532
+ "Modal",
533
+ "Pager Indicator",
534
+ "Slider",
535
+ "On/Off Switch",
536
+ "Button Bar",
537
+ "Toolbar",
538
+ "Number Stepper",
539
+ "Multi-Tab",
540
+ "Date Picker",
541
+ "Map View",
542
+ "Video",
543
+ "Bottom Navigation",
544
+ ],
545
+ ),
546
+ "clickable": ds.Value("bool"),
547
+ "klass": ds.Value("string"),
548
+ "icon_class": ds.Value("string"),
549
+ "resource_id": ds.Value("string"),
550
+ }
551
+ return ds.Features(
552
+ {
553
+ "ancestors": ds.Sequence(ds.Value("string")),
554
+ "klass": ds.Value("string"),
555
+ "bounds": ds.Sequence(ds.Value("int64")),
556
+ "clickable": ds.Value("bool"),
557
+ "children": ds.Sequence(ds.Sequence(ui_component)),
558
+ "screenshot": ds.Image(),
559
+ }
560
+ )
561
+
562
+ def load_examples(self, base_dir: pathlib.Path) -> List[Any]:
563
+ task_dir = base_dir / "semantic_annotations"
564
+ json_files = [f for f in task_dir.iterdir() if f.suffix == ".json"]
565
+ return json_files
566
+
567
+ def generate_examples(self, examples: List[pathlib.Path]):
568
+ for i, json_file in enumerate(examples):
569
+ with json_file.open("r") as rf:
570
+ json_dict = json.load(rf)
571
+
572
+ children = self._flatten_children(children=json_dict.pop("children"))
573
+ json_dict["children"] = [v for v in children.values()]
574
+ json_dict["screenshot"] = self._load_image(
575
+ json_file.parent / f"{json_file.stem}.png"
576
+ )
577
+ data = UiScreenshotsAndHierarchiesWithSemanticAnnotationsData.from_dict(
578
+ json_dict
579
+ )
580
+ yield i, asdict(data)
581
+
582
+
583
+ class UiMetadataProcessor(RicoMetadataProcessor):
584
+ def get_features(self) -> ds.Features:
585
+ return ds.Features(
586
+ {
587
+ "ui_number": ds.Value("int32"),
588
+ "app_package_name": ds.Value("string"),
589
+ "interaction_trace_number": ds.Value("string"),
590
+ "ui_number_in_trace": ds.Value("string"),
591
+ }
592
+ )
593
+
594
+ def load_examples(self, csv_file: pathlib.Path) -> List[Any]:
595
+ df = pd.read_csv(csv_file) # 66261 col
596
+ df.columns = ["_".join(col.split()) for col in df.columns.str.lower()]
597
+ return df.to_dict(orient="records")
598
+
599
+ def split_generators(
600
+ self, csv_file: pathlib.Path, **kwargs
601
+ ) -> List[ds.SplitGenerator]:
602
+ metadata = self.load_examples(csv_file)
603
+ return [ds.SplitGenerator(name="metadata", gen_kwargs={"examples": metadata})]
604
+
605
+ def generate_examples(self, examples: List[Any]):
606
+ for i, metadata in enumerate(examples):
607
+ yield i, metadata
608
+
609
+
610
+ class PlayStoreMetadataProcessor(RicoMetadataProcessor):
611
+ def get_features(self) -> ds.Features:
612
+ return ds.Features(
613
+ {
614
+ "app_package_name": ds.Value("string"),
615
+ "play_store_name": ds.Value("string"),
616
+ "category": ds.ClassLabel(
617
+ num_classes=27,
618
+ names=[
619
+ "Books & Reference",
620
+ "Comics",
621
+ "Health & Fitness",
622
+ "Social",
623
+ "Entertainment",
624
+ "Weather",
625
+ "Communication",
626
+ "Sports",
627
+ "News & Magazines",
628
+ "Finance",
629
+ "Shopping",
630
+ "Education",
631
+ "Travel & Local",
632
+ "Business",
633
+ "Medical",
634
+ "Beauty",
635
+ "Food & Drink",
636
+ "Dating",
637
+ "Auto & Vehicles",
638
+ "Music & Audio",
639
+ "House & Home",
640
+ "Maps & Navigation",
641
+ "Lifestyle",
642
+ "Art & Design",
643
+ "Parenting",
644
+ "Events",
645
+ "Video Players & Editors",
646
+ ],
647
+ ),
648
+ "average_rating": ds.Value("float32"),
649
+ "number_of_ratings": ds.Value("int32"),
650
+ "number_of_downloads": ds.ClassLabel(
651
+ num_classes=15,
652
+ names=[
653
+ "100,000 - 500,000",
654
+ "10,000 - 50,000",
655
+ "50,000,000 - 100,000,000",
656
+ "50,000 - 100,000",
657
+ "1,000,000 - 5,000,000",
658
+ "5,000,000 - 10,000,000",
659
+ "500,000 - 1,000,000",
660
+ "1,000 - 5,000",
661
+ "10,000,000 - 50,000,000",
662
+ "5,000 - 10,000",
663
+ "100,000,000 - 500,000,000",
664
+ "500,000,000 - 1,000,000,000",
665
+ "500 - 1,000",
666
+ "1,000,000,000 - 5,000,000,000",
667
+ "100 - 500",
668
+ ],
669
+ ),
670
+ "date_updated": ds.Value("string"),
671
+ "icon_url": ds.Value("string"),
672
+ }
673
+ )
674
+
675
+ def cleanup_metadata(self, df: pd.DataFrame) -> pd.DataFrame:
676
+ df = df.assign(
677
+ number_of_downloads=df["number_of_downloads"].str.strip(),
678
+ number_of_ratings=df["number_of_ratings"]
679
+ .str.replace('"', "")
680
+ .str.strip()
681
+ .astype(int),
682
+ )
683
+
684
+ def remove_noisy_data(df: pd.DataFrame) -> pd.DataFrame:
685
+ old_num = len(df)
686
+ df = df[
687
+ (df["category"] != "000 - 1")
688
+ | (df["number_of_downloads"] != "January 10, 2015")
689
+ ]
690
+ new_num = len(df)
691
+ assert new_num == old_num - 1
692
+ return df
693
+
694
+ df = remove_noisy_data(df)
695
+
696
+ return df
697
+
698
+ def load_examples(self, csv_file: pathlib.Path) -> List[Any]:
699
+ df = pd.read_csv(csv_file)
700
+ df.columns = ["_".join(col.split()) for col in df.columns.str.lower()]
701
+ df = self.cleanup_metadata(df)
702
+ return df.to_dict(orient="records")
703
+
704
+ def split_generators(
705
+ self, csv_file: pathlib.Path, **kwargs
706
+ ) -> List[ds.SplitGenerator]:
707
+ metadata = self.load_examples(csv_file)
708
+ return [ds.SplitGenerator(name="metadata", gen_kwargs={"examples": metadata})]
709
+
710
+ def generate_examples(self, examples: List[Any]):
711
+ for i, metadata in enumerate(examples):
712
+ yield i, metadata
713
+
714
+
715
+ @dataclass
716
+ class RicoConfig(ds.BuilderConfig):
717
+ train_ratio: float = 0.85
718
+ validation_ratio: float = 0.05
719
+ test_ratio: float = 0.10
720
+ random_state: int = 0
721
+ data_url: Optional[str] = None
722
+ processor: Optional[RicoProcessor] = None
723
+
724
+ def __post_init__(self):
725
+ assert self.data_url is not None
726
+ assert self.processor is not None
727
+ assert self.train_ratio + self.validation_ratio + self.test_ratio == 1.0
728
+
729
+
730
+ class RicoDataset(ds.GeneratorBasedBuilder):
731
+ VERSION = ds.Version("1.0.0")
732
+ BUILDER_CONFIGS = [
733
+ RicoConfig(
734
+ name="ui-screenshots-and-view-hierarchies",
735
+ version=VERSION,
736
+ description="Contains 66k+ unique UI screens",
737
+ data_url="https://storage.googleapis.com/crowdstf-rico-uiuc-4540/rico_dataset_v0.1/unique_uis.tar.gz",
738
+ processor=UiScreenshotsAndViewHierarchiesProcessor(),
739
+ ),
740
+ RicoConfig(
741
+ name="ui-layout-vectors",
742
+ version=VERSION,
743
+ description="Contains 64-dimensional vector representations for each UI screen that encode layout based on the distribution of text and images.",
744
+ data_url="https://storage.googleapis.com/crowdstf-rico-uiuc-4540/rico_dataset_v0.1/ui_layout_vectors.zip",
745
+ processor=UiLayoutVectorsProcessor(),
746
+ ),
747
+ RicoConfig(
748
+ name="interaction-traces",
749
+ version=VERSION,
750
+ description="Contains user interaction traces organized by app.",
751
+ data_url="https://storage.googleapis.com/crowdstf-rico-uiuc-4540/rico_dataset_v0.1/traces.tar.gz",
752
+ processor=InteractionTracesProcessor(),
753
+ ),
754
+ RicoConfig(
755
+ name="animations",
756
+ version=VERSION,
757
+ description="Contains GIFs that demonstrate how screens animated in response to a user interaction; follows the same folder structure introduced for interaction traces.",
758
+ data_url="https://storage.googleapis.com/crowdstf-rico-uiuc-4540/rico_dataset_v0.1/animations.tar.gz",
759
+ processor=AnimationsProcessor(),
760
+ ),
761
+ RicoConfig(
762
+ name="ui-screenshots-and-hierarchies-with-semantic-annotations",
763
+ version=VERSION,
764
+ description="Contains 66k+ UI screens and hierarchies augmented with semantic annotations that describe what elements on the screen mean and how they are used.",
765
+ data_url="https://storage.googleapis.com/crowdstf-rico-uiuc-4540/rico_dataset_v0.1/semantic_annotations.zip",
766
+ processor=UiScreenshotsAndHierarchiesWithSemanticAnnotationsProcessor(),
767
+ ),
768
+ RicoConfig(
769
+ name="ui-metadata",
770
+ version=VERSION,
771
+ description="Contains metadata about each UI screen: the name of the app it came from, the user interaction trace within that app.",
772
+ data_url="https://storage.googleapis.com/crowdstf-rico-uiuc-4540/rico_dataset_v0.1/ui_details.csv",
773
+ processor=UiMetadataProcessor(),
774
+ ),
775
+ RicoConfig(
776
+ name="play-store-metadata",
777
+ version=VERSION,
778
+ description="Contains metadata about the apps in the dataset including an app’s category, average rating, number of ratings, and number of downloads.",
779
+ data_url="https://storage.googleapis.com/crowdstf-rico-uiuc-4540/rico_dataset_v0.1/app_details.csv",
780
+ processor=PlayStoreMetadataProcessor(),
781
+ ),
782
+ ]
783
+
784
+ def _info(self) -> ds.DatasetInfo:
785
+ processor: RicoProcessor = self.config.processor
786
+ return ds.DatasetInfo(
787
+ description=_DESCRIPTION,
788
+ citation=_CITATION,
789
+ homepage=_HOMEPAGE,
790
+ license=_LICENSE,
791
+ features=processor.get_features(),
792
+ )
793
+
794
+ def _split_generators(self, dl_manager: ds.DownloadManager):
795
+ config: RicoConfig = self.config
796
+ assert config.processor is not None
797
+ processor: RicoProcessor = config.processor
798
+
799
+ return processor.split_generators(
800
+ dl_manager.download_and_extract(self.config.data_url),
801
+ train_ratio=config.train_ratio,
802
+ validation_ratio=config.validation_ratio,
803
+ test_ratio=config.test_ratio,
804
+ )
805
+
806
+ def _generate_examples(self, **kwargs):
807
+ config: RicoConfig = self.config
808
+ assert config.processor is not None
809
+ processor: RicoProcessor = config.processor
810
+ yield from processor.generate_examples(**kwargs)
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "huggingface-datasets-rico"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Shunsuke KITADA <[email protected]>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "^3.9"
10
+ datasets = {extras = ["vision"], version = "^2.14.6"}
11
+
12
+
13
+ [tool.poetry.group.dev.dependencies]
14
+ ruff = "^0.1.1"
15
+ black = "^23.10.1"
16
+ mypy = "^1.6.1"
17
+ pytest = "^7.4.2"
18
+ types-pillow = "^10.1.0.0"
19
+
20
+ [build-system]
21
+ requires = ["poetry-core"]
22
+ build-backend = "poetry.core.masonry.api"
tests/Rico_test.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets as ds
4
+ import pytest
5
+
6
+
7
+ @pytest.fixture
8
+ def dataset_path() -> str:
9
+ return "Rico.py"
10
+
11
+
12
+ @pytest.mark.skipif(
13
+ condition=bool(os.environ.get("CI", False)),
14
+ reason=(
15
+ "Because this loading script downloads a large dataset, "
16
+ "we will skip running it on CI."
17
+ ),
18
+ )
19
+ @pytest.mark.parametrize(
20
+ argnames=(
21
+ "dataset_task",
22
+ "expected_num_train",
23
+ "expected_num_valid",
24
+ "expected_num_test",
25
+ ),
26
+ argvalues=(
27
+ ("ui-screenshots-and-view-hierarchies", 56322, 3314, 6625),
28
+ ("ui-layout-vectors", 61288, 3606, 7209),
29
+ ("interaction-traces", 8749, 513, 1030),
30
+ # "animations",
31
+ ("ui-screenshots-and-hierarchies-with-semantic-annotations", 56322, 3314, 6625),
32
+ ),
33
+ )
34
+ def test_load_dataset(
35
+ dataset_path: str,
36
+ dataset_task: str,
37
+ expected_num_train: int,
38
+ expected_num_valid: int,
39
+ expected_num_test: int,
40
+ ):
41
+ dataset = ds.load_dataset(path=dataset_path, name=dataset_task)
42
+ assert dataset["train"].num_rows == expected_num_train
43
+ assert dataset["validation"].num_rows == expected_num_valid
44
+ assert dataset["test"].num_rows == expected_num_test
45
+
46
+
47
+ @pytest.mark.skipif(
48
+ condition=bool(os.environ.get("CI", False)),
49
+ reason=(
50
+ "Because this loading script downloads a large dataset, "
51
+ "we will skip running it on CI."
52
+ ),
53
+ )
54
+ @pytest.mark.parametrize(
55
+ argnames=("dataset_task", "expected_num_data"),
56
+ argvalues=(
57
+ ("ui-metadata", 66261),
58
+ ("play-store-metadata", 9384 - 1), # There is one invalid data
59
+ ),
60
+ )
61
+ def test_load_metadata(dataset_path: str, dataset_task: str, expected_num_data: int):
62
+ metadata = ds.load_dataset(path=dataset_path, name=dataset_task)
63
+ assert metadata["metadata"].num_rows == expected_num_data
tests/__init__.py ADDED
File without changes