XINZHANG94 commited on
Commit
8a27397
·
1 Parent(s): dae6b9a
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.ttf filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fonts/
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ pip-wheel-metadata/
26
+ share/python-wheels/
27
+ *.egg-info/
28
+ .installed.cfg
29
+ *.egg
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ target/
78
+
79
+ # Jupyter Notebook
80
+ .ipynb_checkpoints
81
+
82
+ # IPython
83
+ profile_default/
84
+ ipython_config.py
85
+
86
+ # pyenv
87
+ .python-version
88
+
89
+ # pipenv
90
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
91
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
92
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
93
+ # install all needed dependencies.
94
+ #Pipfile.lock
95
+
96
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
97
+ __pypackages__/
98
+
99
+ # Celery stuff
100
+ celerybeat-schedule
101
+ celerybeat.pid
102
+
103
+ # SageMath parsed files
104
+ *.sage.py
105
+
106
+ # Environments
107
+ .env
108
+ .envrc
109
+ .venv
110
+ venv/
111
+ ENV/
112
+ env/
113
+ env.bak/
114
+ venv.bak/
115
+
116
+ # Spyder project settings
117
+ .spyderproject
118
+ .spyproject
119
+
120
+ # Rope project settings
121
+ .ropeproject
122
+
123
+ # mkdocs documentation
124
+ /site
125
+
126
+ # mypy
127
+ .mypy_cache/
128
+ .dmypy.json
129
+ dmypy.json
130
+
131
+ # Pyre type checker
132
+ .pyre/
133
+
134
+ # pytype static type analyzer
135
+ .pytype/
136
+
137
+ # Cython debug symbols
138
+ cython_debug/
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
- title: Tiles
3
  emoji: 😻
4
- colorFrom: gray
5
  colorTo: blue
6
  sdk: gradio
7
  sdk_version: 4.22.0
 
1
  ---
2
+ title: Xin Cpu Test
3
  emoji: 😻
4
+ colorFrom: indigo
5
  colorTo: blue
6
  sdk: gradio
7
  sdk_version: 4.22.0
app.py ADDED
@@ -0,0 +1,619 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from PIL import Image, ImageFont
4
+
5
+ from typing import List
6
+ from setting_names import names_mapping
7
+ from utils import (
8
+ PixelColorPalette,
9
+ Painting,
10
+ ColorSelection,
11
+ ImageBoarder,
12
+ ImageText,
13
+ image_paste_methods,
14
+ HexagonGrid,
15
+ ColorPalette,
16
+ ImageManipulation
17
+ )
18
+
19
+ langangue = 'cn'
20
+ abs_path_font = os.path.abspath(__file__).split(os.sep)[:-1]
21
+ font_abs_path = os.sep.join(abs_path_font + ["fonts"])
22
+
23
+ names = dict((label_name, lan[langangue]) for label_name, lan in names_mapping.items())
24
+ def get_image_examples():
25
+ examples=[
26
+ os.path.join(os.path.dirname(__file__), "images/dam_01.png"),
27
+ os.path.join(os.path.dirname(__file__), "images/dam_02.png"),
28
+ os.path.join(os.path.dirname(__file__), "images/dam_03.png"),
29
+ os.path.join(os.path.dirname(__file__), "images/dam_04.png"),
30
+ os.path.join(os.path.dirname(__file__), "images/dam_05.png"),
31
+ os.path.join(os.path.dirname(__file__), "images/dam_06.png"),
32
+ ]
33
+ return examples
34
+
35
+
36
+ # step1 reduce image colors
37
+ def pixel_palette_image(raw_image: Image.Image,
38
+ transformation_method: str,
39
+ rgb_palette: str,
40
+ max_colors: int,
41
+ distance_strategy: str,) -> List[Image.Image]:
42
+ """This function will reduce the color into wanted number of colors."""
43
+ if not rgb_palette:
44
+ rgb_palette = None
45
+ print("mapping to colors...")
46
+ pcp = PixelColorPalette(pil_image=raw_image,
47
+ transformation_method=transformation_method,
48
+ rgb_palette=rgb_palette,
49
+ max_colors=max_colors,
50
+ distance_strategy=distance_strategy
51
+ )
52
+ image = pcp.translate()
53
+ cp = ColorPalette(
54
+ pil_image=image,
55
+ color_block_size=1,
56
+ )
57
+ rgb_string = cp.rgbs_string
58
+ print("Mapped to limited colors...")
59
+ return [image, rgb_string]
60
+
61
+ # setp2 pixel the image
62
+ def big_tile():
63
+ return (50, 120, 25, 2, 1)
64
+ def small_tile():
65
+ return (25, 60, 12, 1, 1)
66
+ def map_tiles(
67
+ image: Image.Image,
68
+ painting_width: int,
69
+ painting_height: int,
70
+ tile_preset: str,
71
+ tile_width: int,
72
+ tile_height: int,
73
+ tile_height_tip: int,
74
+ gap_horizontal: int,
75
+ gap_vertical: int,
76
+ gap_color: str,
77
+ color_aggregate_type: str) -> None:
78
+ print("Creating Tiles...")
79
+ if tile_preset.startswith("large"):
80
+ tile_width, tile_height, tile_height_tip, gap_horizontal, gap_vertical = big_tile()
81
+ elif tile_preset.startswith("small"):
82
+ tile_width, tile_height, tile_height_tip, gap_horizontal, gap_vertical = small_tile()
83
+ tiles = Painting(
84
+ painting_width,
85
+ painting_height,
86
+ tile_width,
87
+ tile_height,
88
+ tile_height_tip,
89
+ gap_horizontal,
90
+ gap_vertical,
91
+ gap_color
92
+ )
93
+ cs = ColorSelection(image, color_aggregate_type, tiles)
94
+ pil_image = cs.translate_image_tensor()
95
+ print("Created tileed image...")
96
+ return pil_image
97
+
98
+
99
+ # step3 apply boarder
100
+ def apply_boarder(image: Image.Image,
101
+ left_boarder_dist: int,
102
+ right_boarder_dist: int,
103
+ top_boarder_dist: int,
104
+ bottom_boarder_dist: int,
105
+ line_width: int,
106
+ line_color: str,
107
+ margin_left: int,
108
+ margin_right: int,
109
+ margin_top: int,
110
+ margin_bottom: int) -> Image.Image:
111
+ print("Drawing boarder...")
112
+ ib = ImageBoarder(
113
+ image=image,
114
+ line_distances={"left": left_boarder_dist, "right": right_boarder_dist, "top": top_boarder_dist, "bottom": bottom_boarder_dist},
115
+ line_width=line_width,
116
+ line_color=line_color,
117
+ margin_widths={"left": margin_left, "right": margin_right, "top": margin_top, "bottom": margin_bottom}
118
+ )
119
+ image = ib.add_custom_lines()
120
+ return image
121
+
122
+
123
+ # step4 create header text
124
+ def create_header_text(
125
+ image: Image.Image,
126
+ width: int,
127
+ height: int,
128
+ bg_color: str,
129
+ font_name: str,
130
+ font_size: int,
131
+ text_color: str,
132
+ x_pos: int,
133
+ y_pos: int,
134
+ text: str,
135
+ onto_position_x: int,
136
+ onto_position_y: int
137
+ ) -> Image.Image:
138
+ print("Drawing header text...")
139
+ ti = ImageText(
140
+ width=width,
141
+ height=height,
142
+ background_color=bg_color,
143
+ text=text,
144
+ font_name=font_name,
145
+ font_size=font_size,
146
+ text_color=text_color,
147
+ text_position=(x_pos, y_pos),
148
+ abs_path=font_abs_path
149
+ )
150
+ text_image = ti.create_text_image()
151
+ image_paste = image_paste_methods(text_image, image, "no-resize", onto_position_x, onto_position_y, None, None)
152
+ result_image, result_mask = image_paste.paste_onto()
153
+ return result_image
154
+
155
+
156
+ # step5 color grid
157
+ def create_color_grid(
158
+ image: Image.Image,
159
+ color_list: str,
160
+ hex_width: int,
161
+ hex_height: int,
162
+ tip_height: int,
163
+ bg_color: str,
164
+ onto_position_x: int,
165
+ onto_position_y: int
166
+ ) -> Image.Image:
167
+ print("Drawing color grid...")
168
+ hg = HexagonGrid(
169
+ colors=color_list,
170
+ width=hex_width,
171
+ hex_height=hex_height,
172
+ tip_height=tip_height,
173
+ bg_color=bg_color
174
+ )
175
+ grid = hg.create_color_hexagon_grid()
176
+ image_paste = image_paste_methods(grid, image, "no-resize", onto_position_x, onto_position_y, None, None)
177
+ result_image, result_mask = image_paste.paste_onto()
178
+ return result_image
179
+
180
+
181
+ # step6 top edge label
182
+ def top_edge_label(
183
+ image: Image.Image,
184
+ width: int,
185
+ height: int,
186
+ bg_color: str,
187
+ font_name: str,
188
+ font_size: int,
189
+ text_color: str,
190
+ x_pos: int,
191
+ y_pos: int,
192
+ text: str,
193
+ onto_position_x: int,
194
+ onto_position_y: int
195
+ ) -> Image.Image:
196
+ print("Drawing top label...")
197
+ ti = ImageText(
198
+ width=width,
199
+ height=height,
200
+ background_color=bg_color,
201
+ text=text,
202
+ font_name=font_name,
203
+ font_size=font_size,
204
+ text_color=text_color,
205
+ text_position=(x_pos, y_pos),
206
+ abs_path=font_abs_path
207
+
208
+ )
209
+ text_image = ti.create_text_image()
210
+ image_paste = image_paste_methods(text_image, image, "no-resize", onto_position_x, onto_position_y, None, None)
211
+ result_image, result_mask = image_paste.paste_onto()
212
+ return result_image
213
+
214
+
215
+ # step7 left edge label
216
+ def left_edge_label(
217
+ image: Image.Image,
218
+ width: int,
219
+ height: int,
220
+ bg_color: str,
221
+ font_name: str,
222
+ font_size: int,
223
+ text_color: str,
224
+ x_pos: int,
225
+ y_pos: int,
226
+ text: str,
227
+ onto_position_x: int,
228
+ onto_position_y: int
229
+ ) -> Image.Image:
230
+ print("Drawing left label...")
231
+ ti = ImageText(
232
+ width=width,
233
+ height=height,
234
+ background_color=bg_color,
235
+ text=text,
236
+ font_name=font_name,
237
+ font_size=font_size,
238
+ text_color=text_color,
239
+ text_position=(x_pos, y_pos),
240
+ abs_path=font_abs_path
241
+ )
242
+ text_image = ti.create_text_image()
243
+ im = ImageManipulation(text_image)
244
+ text_image = im.rotation(90)
245
+ image_paste = image_paste_methods(text_image, image, "no-resize", onto_position_x, onto_position_y, None, None)
246
+ result_image, result_mask = image_paste.paste_onto()
247
+
248
+ return result_image
249
+
250
+
251
+ # step8 right edge label
252
+ def right_edge_label(
253
+ image: Image.Image,
254
+ width: int,
255
+ height: int,
256
+ bg_color: str,
257
+ font_name: str,
258
+ font_size: int,
259
+ text_color: str,
260
+ x_pos: int,
261
+ y_pos: int,
262
+ text: str,
263
+ onto_position_x: int,
264
+ onto_position_y: int
265
+ ) -> Image.Image:
266
+ print("Drawing right label...")
267
+ ti = ImageText(
268
+ width=width,
269
+ height=height,
270
+ background_color=bg_color,
271
+ text=text,
272
+ font_name=font_name,
273
+ font_size=font_size,
274
+ text_color=text_color,
275
+ text_position=(x_pos, y_pos),
276
+ abs_path=font_abs_path
277
+ )
278
+ text_image = ti.create_text_image()
279
+ im = ImageManipulation(text_image)
280
+ text_image = im.rotation(90)
281
+ image_paste = image_paste_methods(text_image, image, "no-resize", onto_position_x, onto_position_y, None, None)
282
+ result_image, result_mask = image_paste.paste_onto()
283
+ return result_image
284
+
285
+
286
+ # step8 bot edge label
287
+ def bot_edge_label(
288
+ image: Image.Image,
289
+ width: int,
290
+ height: int,
291
+ bg_color: str,
292
+ font_name: str,
293
+ font_size: int,
294
+ text_color: str,
295
+ x_pos: int,
296
+ y_pos: int,
297
+ text: str,
298
+ onto_position_x: int,
299
+ onto_position_y: int
300
+ ) -> Image.Image:
301
+ print("Drawing bottom label...")
302
+ ti = ImageText(
303
+ width=width,
304
+ height=height,
305
+ background_color=bg_color,
306
+ text=text,
307
+ font_name=font_name,
308
+ font_size=font_size,
309
+ text_color=text_color,
310
+ text_position=(x_pos, y_pos),
311
+ abs_path=font_abs_path
312
+ )
313
+ text_image = ti.create_text_image()
314
+ image_paste = image_paste_methods(text_image, image, "no-resize", onto_position_x, onto_position_y, None, None)
315
+ result_image, result_mask = image_paste.paste_onto()
316
+ return result_image
317
+
318
+ with gr.Blocks() as demo:
319
+ gr.Markdown(names["page_header"])
320
+ with gr.Row(equal_height=False):
321
+ with gr.Column():
322
+ with gr.Accordion(open=True, label=names['raw_image']):
323
+ raw_image = gr.Image(type="pil")
324
+ btn = gr.Button(names['run'], variant="primary")
325
+
326
+ # parameters for image color reduce
327
+ with gr.Accordion(open=True, label=names["image_color_reduce_settings"]):
328
+ transformation_method = gr.Dropdown(
329
+ ["original",
330
+ "lab_color",
331
+ "lab_lightness",
332
+ "lab_red_green",
333
+ "lab_blue_yellow",
334
+ "brightness",
335
+ "hsv_color",
336
+ "hue",
337
+ "saturation",
338
+ "value"], label=names["transformation_method"], value="original")
339
+ max_colors = gr.Slider(minimum=1, maximum=30, step=1, value=16, label=names["max_colors"])
340
+ distance_strategy = gr.Dropdown(["euclidean", "cosine"], label=names["distance_strategy"], value="euclidean")
341
+ rgb_palette = gr.Textbox(placeholder=names["rgb_palette_place_holder"], label=names["rgb_palette"])
342
+ color_list = gr.Textbox(label=names["color_list"])
343
+
344
+ # parameters for tile settings
345
+ with gr.Accordion(open=False, label=names["tile_settings"]):
346
+ painting_width = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['painting_width'], value=4800)
347
+ painting_height = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['painting_height'], value=7000)
348
+ tile_preset = gr.Dropdown(["large(50*120)", "small(25*60)", "customize"], label=names["tile_preset"], value="large(50*120)")
349
+ tile_width = gr.Number(precision=0, minimum=1, maximum=999, step=1, label=names['tile_width'], value=50)
350
+ tile_height = gr.Number(precision=0, minimum=1, maximum=999, step=1, label=names['tile_height'], value=120)
351
+ tile_height_tip = gr.Number(precision=0, minimum=1, maximum=999, step=1, label=names['tile_height_tip'], value=25)
352
+ gap_horizontal = gr.Number(precision=0, minimum=0, maximum=99, step=1, label=names['gap_horizontal'], value=2)
353
+ gap_vertical = gr.Number(precision=0, minimum=0, maximum=99, step=1, label=names['gap_vertical'], value=1)
354
+ gap_color = gr.Dropdown(['black', 'white'], label=names["gap_color"], value='black')
355
+ color_aggregate_type = gr.Dropdown(['average', 'marjority'], label=names["color_aggregate_type"], value='marjority')
356
+
357
+ # parameters for boarder settings
358
+ with gr.Accordion(open=False, label=names["boader_settings"]):
359
+ left_boarder_dist = gr.Number(precision=0, minimum=-999, maximum=999, step=1, label=names['left_boarder_dist'], value=0)
360
+ right_boarder_dist = gr.Number(precision=0, minimum=-999, maximum=999, step=1, label=names['right_boarder_dist'], value=-11)
361
+ top_boarder_dist = gr.Number(precision=0, minimum=-999, maximum=999, step=1, label=names['top_boarder_dist'], value=-30)
362
+ bottom_boarder_dist = gr.Number(precision=0, minimum=-999, maximum=999, step=1, label=names['bottom_boarder_dist'], value=0)
363
+ line_width = gr.Number(precision=0, minimum=1, maximum=99, step=1, label=names['line_width'], value=10)
364
+ line_color = gr.Dropdown(['red', 'blue'], label=names["line_color"], value='red')
365
+ margin_left = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['margin_left'], value=350)
366
+ margin_right = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['margin_right'], value=350)
367
+ margin_top = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['margin_top'], value=700)
368
+ margin_bottom = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['margin_bottom'], value=200)
369
+
370
+ # parameters for header text settings
371
+ with gr.Accordion(open=False, label=names["header_text_settings"]):
372
+ header_text_width = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['text_width'], value=3000)
373
+ header_text_height = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['text_height'], value=400)
374
+ header_text_bg_color = gr.Dropdown(['black', 'white'], label=names["text_bg_color"], value='white')
375
+ header_font_name = gr.Dropdown(['华文细黑.ttf'], label=names["font_name"], value='华文细黑.ttf')
376
+ header_font_size = gr.Slider(minimum=1, maximum=999, step=1, value=80, label=names["font_size"])
377
+ header_text_color = gr.Dropdown(['black', 'white'], label=names["text_color"], value='black')
378
+ header_text_x_pos = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['text_x_pos'], value=0)
379
+ header_text_y_pos = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['text_y_pos'], value=0)
380
+ header_text_value = (
381
+ "户尺寸: W4800*H7000MM (产品尺寸:W4800*H7066MM)\n"
382
+ "格:33*80MM单颗粒密拼带边小鳞光\n"
383
+ "工艺:手工注浆\n"
384
+ "备注:颜色参��(16个色)"
385
+ )
386
+ header_text = gr.Text(value=header_text_value, label=names["text"])
387
+ header_onto_position_x = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_x'], value=300)
388
+ header_onto_position_y = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_y'], value=150)
389
+
390
+ # parameters for grid settings
391
+ with gr.Accordion(open=False, label=names["grid_settings"]):
392
+ hex_width = gr.Number(precision=0, minimum=1, maximum=999, step=1, label=names['hex_width'], value=100)
393
+ hex_height = gr.Number(precision=0, minimum=1, maximum=999, step=1, label=names['hex_height'], value=150)
394
+ tip_height = gr.Number(precision=0, minimum=1, maximum=999, step=1, label=names['tip_height'], value=50)
395
+ grid_bg_color = gr.Dropdown(['black', 'white'], label=names["grid_bg_color"], value='white')
396
+ grid_onto_position_x = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_x'], value=2000)
397
+ grid_onto_position_y = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_y'], value=300)
398
+
399
+ # parameters for top edge text settings
400
+ with gr.Accordion(open=False, label=names["top_edge_text_setting"]):
401
+ top_text_width = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['text_width'], value=300)
402
+ top_text_height = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['text_height'], value=80)
403
+ top_text_bg_color = gr.Dropdown(['black', 'white'], label=names["text_bg_color"], value='white')
404
+ top_font_name = gr.Dropdown(['华文细黑.ttf'], label=names["font_name"], value='华文细黑.ttf')
405
+ top_font_size = gr.Slider(minimum=1, maximum=999, step=1, value=60, label=names["font_size"])
406
+ top_text_color = gr.Dropdown(['black', 'white'], label=names["text_color"], value='black')
407
+ top_text_x_pos = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['text_x_pos'], value=0)
408
+ top_text_y_pos = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['text_y_pos'], value=0)
409
+ top_text_value = (
410
+ "4800mm"
411
+ )
412
+ top_text = gr.Text(value=top_text_value, label=names["text"])
413
+ top_onto_position_x = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_x'], value=2500)
414
+ top_onto_position_y = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_y'], value=600)
415
+
416
+ # parameters for left edge text settings
417
+ with gr.Accordion(open=False, label=names["left_edge_text_setting"]):
418
+ left_text_width = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['text_width'], value=300)
419
+ left_text_height = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['text_height'], value=80)
420
+ left_text_bg_color = gr.Dropdown(['black', 'white'], label=names["text_bg_color"], value='white')
421
+ left_font_name = gr.Dropdown(['华文细黑.ttf'], label=names["font_name"], value='华文细黑.ttf')
422
+ left_font_size = gr.Slider(minimum=1, maximum=999, step=1, value=60, label=names["font_size"])
423
+ left_text_color = gr.Dropdown(['black', 'white'], label=names["text_color"], value='black')
424
+ left_text_x_pos = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['text_x_pos'], value=0)
425
+ left_text_y_pos = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['text_y_pos'], value=0)
426
+ left_text_value = (
427
+ "7066mm"
428
+ )
429
+ left_text = gr.Text(value=left_text_value, label=names["text"])
430
+ left_onto_position_x = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_x'], value=200)
431
+ left_onto_position_y = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_y'], value=3500)
432
+
433
+ # parameters for right edge text settings
434
+ with gr.Accordion(open=False, label=names["right_edge_text_setting"]):
435
+ right_text_width = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['text_width'], value=300)
436
+ right_text_height = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['text_height'], value=80)
437
+ right_text_bg_color = gr.Dropdown(['black', 'white'], label=names["text_bg_color"], value='white')
438
+ right_font_name = gr.Dropdown(['华文细黑.ttf'], label=names["font_name"], value='华文细黑.ttf')
439
+ right_font_size = gr.Slider(minimum=1, maximum=999, step=1, value=60, label=names["font_size"])
440
+ right_text_color = gr.Dropdown(['black', 'white'], label=names["text_color"], value='black')
441
+ right_text_x_pos = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['text_x_pos'], value=0)
442
+ right_text_y_pos = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['text_y_pos'], value=0)
443
+ right_text_value = (
444
+ "7000mm"
445
+ )
446
+ right_text = gr.Text(value=right_text_value, label=names["text"])
447
+ right_onto_position_x = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_x'], value=5200)
448
+ right_onto_position_y = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_y'], value=3500)
449
+
450
+ # parameters for bot edge text settings
451
+ with gr.Accordion(open=False, label=names["bot_edge_text_setting"]):
452
+ bot_text_width = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['text_width'], value=300)
453
+ bot_text_height = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label=names['text_height'], value=80)
454
+ bot_text_bg_color = gr.Dropdown(['black', 'white'], label=names["text_bg_color"], value='white')
455
+ bot_font_name = gr.Dropdown(['华文细黑.ttf'], label=names["font_name"], value='华文细黑.ttf')
456
+ bot_font_size = gr.Slider(minimum=1, maximum=999, step=1, value=60, label=names["font_size"])
457
+ bot_text_color = gr.Dropdown(['black', 'white'], label=names["text_color"], value='black')
458
+ bot_text_x_pos = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['text_x_pos'], value=0)
459
+ bot_text_y_pos = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['text_y_pos'], value=0)
460
+ bot_text_value = (
461
+ "4800mm"
462
+ )
463
+ bot_text = gr.Text(value=bot_text_value, label=names["text"])
464
+ bot_onto_position_x = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_x'], value=2500)
465
+ bot_onto_position_y = gr.Number(precision=0, minimum=0, maximum=9999, step=1, label=names['onto_position_y'], value=7700)
466
+
467
+ with gr.Column():
468
+ final_image = gr.Image(type="pil", label=names['output_image'])
469
+ gr.Examples(
470
+ examples=get_image_examples(),
471
+ inputs=[raw_image],
472
+ fn=None,
473
+ outputs=[final_image],
474
+ cache_examples=False,
475
+ label=names['examples']
476
+ )
477
+ btn.click(
478
+ fn=pixel_palette_image,
479
+ inputs=[
480
+ raw_image,
481
+ transformation_method,
482
+ rgb_palette,
483
+ max_colors,
484
+ distance_strategy
485
+ ],
486
+ outputs=[final_image, color_list]
487
+ ).then(
488
+ fn=map_tiles,
489
+ inputs=[
490
+ final_image,
491
+ painting_width,
492
+ painting_height,
493
+ tile_preset,
494
+ tile_width,
495
+ tile_height,
496
+ tile_height_tip,
497
+ gap_horizontal,
498
+ gap_vertical,
499
+ gap_color,
500
+ color_aggregate_type
501
+ ],
502
+ outputs=final_image
503
+ ).then(
504
+ fn=apply_boarder,
505
+ inputs=[
506
+ final_image,
507
+ left_boarder_dist,
508
+ right_boarder_dist,
509
+ top_boarder_dist,
510
+ bottom_boarder_dist,
511
+ line_width,
512
+ line_color,
513
+ margin_left,
514
+ margin_right,
515
+ margin_top,
516
+ margin_bottom
517
+ ],
518
+ outputs=final_image
519
+ ).then(
520
+ fn=create_header_text,
521
+ inputs=[
522
+ final_image,
523
+ header_text_width,
524
+ header_text_height,
525
+ header_text_bg_color,
526
+ header_font_name,
527
+ header_font_size,
528
+ header_text_color,
529
+ header_text_x_pos,
530
+ header_text_y_pos,
531
+ header_text,
532
+ header_onto_position_x,
533
+ header_onto_position_y
534
+ ],
535
+ outputs=final_image
536
+ ).then(
537
+ fn=create_color_grid,
538
+ inputs=[
539
+ final_image,
540
+ color_list,
541
+ hex_width,
542
+ hex_height,
543
+ tip_height,
544
+ grid_bg_color,
545
+ grid_onto_position_x,
546
+ grid_onto_position_y
547
+ ],
548
+ outputs=final_image
549
+ ).then(
550
+ fn=top_edge_label,
551
+ inputs=[
552
+ final_image,
553
+ top_text_width,
554
+ top_text_height,
555
+ top_text_bg_color,
556
+ top_font_name,
557
+ top_font_size,
558
+ top_text_color,
559
+ top_text_x_pos,
560
+ top_text_y_pos,
561
+ top_text,
562
+ top_onto_position_x,
563
+ top_onto_position_y
564
+ ],
565
+ outputs=final_image
566
+ ).then(
567
+ fn=left_edge_label,
568
+ inputs=[
569
+ final_image,
570
+ left_text_width,
571
+ left_text_height,
572
+ left_text_bg_color,
573
+ left_font_name,
574
+ left_font_size,
575
+ left_text_color,
576
+ left_text_x_pos,
577
+ left_text_y_pos,
578
+ left_text,
579
+ left_onto_position_x,
580
+ left_onto_position_y
581
+ ],
582
+ outputs=final_image
583
+ ).then(
584
+ fn=left_edge_label,
585
+ inputs=[
586
+ final_image,
587
+ right_text_width,
588
+ right_text_height,
589
+ right_text_bg_color,
590
+ right_font_name,
591
+ right_font_size,
592
+ right_text_color,
593
+ right_text_x_pos,
594
+ right_text_y_pos,
595
+ right_text,
596
+ right_onto_position_x,
597
+ right_onto_position_y
598
+ ],
599
+ outputs=final_image
600
+ ).then(
601
+ fn=bot_edge_label,
602
+ inputs=[
603
+ final_image,
604
+ bot_text_width,
605
+ bot_text_height,
606
+ bot_text_bg_color,
607
+ bot_font_name,
608
+ bot_font_size,
609
+ bot_text_color,
610
+ bot_text_x_pos,
611
+ bot_text_y_pos,
612
+ bot_text,
613
+ bot_onto_position_x,
614
+ bot_onto_position_y
615
+ ],
616
+ outputs=final_image
617
+ )
618
+
619
+ demo.launch(share=False)
gitignore ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .envrc
107
+ .venv
108
+ venv/
109
+ ENV/
110
+ env/
111
+ env.bak/
112
+ venv.bak/
113
+
114
+ # Spyder project settings
115
+ .spyderproject
116
+ .spyproject
117
+
118
+ # Rope project settings
119
+ .ropeproject
120
+
121
+ # mkdocs documentation
122
+ /site
123
+
124
+ # mypy
125
+ .mypy_cache/
126
+ .dmypy.json
127
+ dmypy.json
128
+
129
+ # Pyre type checker
130
+ .pyre/
131
+
132
+ # pytype static type analyzer
133
+ .pytype/
134
+
135
+ # Cython debug symbols
136
+ cython_debug/
images/dam_01.png ADDED
images/dam_02.png ADDED
images/dam_03.png ADDED
images/dam_04.png ADDED
images/dam_05.png ADDED
images/dam_06.png ADDED
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ torch==2.2.1
2
+ torchvision==0.17.1
3
+ langchain==0.1.11
4
+ faiss-cpu==1.8.0
5
+ pillow==10.2.0
6
+ chardet==5.2.0
7
+ scikit-image==0.22.0
8
+ scikit-learn==1.4.1.post1
9
+ emoji==2.10.1
setting_names.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ names_mapping = {
2
+ 'page_header': {
3
+ "en": "Upload an image or select an example to start",
4
+ 'cn': "上传图片或者从右侧选择案列以开始"
5
+ },
6
+
7
+ 'raw_image': {
8
+ "en": "Raw image",
9
+ 'cn': "原图"
10
+ },
11
+
12
+
13
+ "image_color_reduce_settings": {
14
+ "en": "Image Color Reduce Settings",
15
+ 'cn': "图片颜色数量设置",
16
+ },
17
+ 'input_image': {
18
+ "en": "Input Image",
19
+ 'cn': "输入图片",
20
+ },
21
+ "transformation_method": {
22
+ "en": "Transformation Method",
23
+ 'cn': "转换方法",
24
+ },
25
+ "distance_strategy": {
26
+ "en": "Distance Strategy",
27
+ 'cn': "距离方法",
28
+ },
29
+ "max_colors": {
30
+ "en": "Max Colors",
31
+ 'cn': "最大颜色数量",
32
+ },
33
+ "rgb_palette": {
34
+ "en": "RBG Palette",
35
+ 'cn': "RBG色板",
36
+ },
37
+ "rgb_palette_place_holder": {
38
+ "en": "RGBs for mapping, if empty, it will find colors automatically with max colors",
39
+ 'cn': "目标RBG参数,可不填,则默认自动找到最大颜色数量的颜色",
40
+ },
41
+ "color_list": {
42
+ "en": "RBG list of reduced color image",
43
+ 'cn': "保留的RGB颜色",
44
+ },
45
+
46
+ "tile_settings": {
47
+ "en": "Tile Settings",
48
+ 'cn': "砖块设置",
49
+ },
50
+ "painting_width": {
51
+ "en": "Final Image Width",
52
+ 'cn': "墙面宽度",
53
+ },
54
+ "painting_height": {
55
+ "en": "Final Image Height",
56
+ 'cn': "墙面高度",
57
+ },
58
+ "tile_preset": {
59
+ "en": "Tile Preset",
60
+ 'cn': "砖块预设",
61
+ },
62
+ "tile_width": {
63
+ "en": "Tile Width",
64
+ 'cn': "砖块宽度",
65
+ },
66
+ "tile_height": {
67
+ "en": "Tile Height(Total)",
68
+ 'cn': "砖块高度(包括尖部)",
69
+ },
70
+ "tile_height_tip": {
71
+ "en": "Tile Tip Height",
72
+ 'cn': "尖部高度",
73
+ },
74
+ "gap_horizontal": {
75
+ "en": "Horizontal Gap",
76
+ 'cn': "横向缝隙",
77
+ },
78
+ "gap_vertical": {
79
+ "en": "Veritcal Gap",
80
+ 'cn': "纵向缝隙",
81
+ },
82
+ "gap_color": {
83
+ "en": "Gap Color",
84
+ 'cn': "缝隙颜色",
85
+ },
86
+ "color_aggregate_type": {
87
+ "en": "Aggregate Color Method",
88
+ 'cn': "颜色聚敛方法",
89
+ },
90
+
91
+ "boader_settings": {
92
+ "en": "Boarder Settings",
93
+ 'cn': "边框设定",
94
+ },
95
+ 'left_boarder_dist': {
96
+ "en": "Left Line Distance",
97
+ 'cn': "左边框距离",
98
+ },
99
+ 'right_boarder_dist': {
100
+ "en": "Right Line Distance",
101
+ 'cn': "右边框距离",
102
+ },
103
+ 'top_boarder_dist': {
104
+ "en": "Top Line Distance",
105
+ 'cn': "上边框距离",
106
+ },
107
+ 'bottom_boarder_dist': {
108
+ "en": "Bottom Line Distance",
109
+ 'cn': "下边框距离",
110
+ },
111
+ 'line_width': {
112
+ "en": "Line Width",
113
+ 'cn': "边框粗细",
114
+ },
115
+ 'line_color': {
116
+ "en": "Line Color",
117
+ 'cn': "边框颜色",
118
+ },
119
+ 'margin_left': {
120
+ "en": "Left Margin",
121
+ 'cn': "左留白",
122
+ },
123
+ 'margin_right': {
124
+ "en": "Right Margin",
125
+ 'cn': "右留白",
126
+ },
127
+ 'margin_top': {
128
+ "en": "Top Margin",
129
+ 'cn': "上留白",
130
+ },
131
+ 'margin_bottom': {
132
+ "en": "Bottom Margin",
133
+ 'cn': "下留白",
134
+ },
135
+
136
+ "header_text_settings": {
137
+ "en": "Header Text Settings",
138
+ 'cn': "主要文字说明",
139
+ },
140
+ 'text_width': {
141
+ "en": 'Text Image Width',
142
+ 'cn': "文字图片宽度",
143
+ },
144
+ 'text_height': {
145
+ "en": 'Text Image Height',
146
+ 'cn': "文字图片高度",
147
+ },
148
+ 'text_bg_color': {
149
+ "en": 'Text Image BG Color',
150
+ 'cn': "文字图片背景颜色",
151
+ },
152
+ 'font_name': {
153
+ "en": 'Font Type',
154
+ 'cn': "字体",
155
+ },
156
+ 'font_size': {
157
+ "en": 'Font Size',
158
+ 'cn': "字号",
159
+ },
160
+ 'text_color': {
161
+ "en": 'Font Color',
162
+ 'cn': "字体颜色",
163
+ },
164
+ 'text_x_pos': {
165
+ "en": 'Text Pos in Text Image X',
166
+ 'cn': "文字在图片位置X",
167
+ },
168
+ 'text_y_pos': {
169
+ "en": 'Text Pos in Text Image Y',
170
+ 'cn': "文字在图片位置Y",
171
+ },
172
+ 'text': {
173
+ "en": 'Text Value',
174
+ 'cn': "文字",
175
+ },
176
+ "onto_position_x": {
177
+ "en": "Paste to X",
178
+ 'cn': "文字图片在原图位置X",
179
+ },
180
+ "onto_position_y": {
181
+ "en": "Paste to Y",
182
+ 'cn': "文字图片在原图位置Y",
183
+ },
184
+
185
+ "grid_settings": {
186
+ "en": "Color Grid Settings",
187
+ 'cn': "颜色网格设定",
188
+ },
189
+ 'hex_width': {
190
+ "en": 'Hex Width',
191
+ 'cn': "六边形宽度",
192
+ },
193
+ 'hex_height': {
194
+ "en": 'Hex Height(Middle Only)',
195
+ 'cn': "六边形高度(仅腰部)",
196
+ },
197
+ 'tip_height': {
198
+ "en": 'Hex Tip Height',
199
+ 'cn': "六边形尖部高度",
200
+ },
201
+ 'grid_bg_color': {
202
+ "en": 'Hex Grid BG Color',
203
+ 'cn': "六边形图片背景颜色",
204
+ },
205
+
206
+ "top_edge_text_setting": {
207
+ "en": "Top Label",
208
+ 'cn': "上边缘标记",
209
+ },
210
+ "left_edge_text_setting": {
211
+ "en": "Left Label",
212
+ 'cn': "左边缘标记",
213
+ },
214
+ "right_edge_text_setting": {
215
+ "en": "Right Label",
216
+ 'cn': "右边缘标记",
217
+ },
218
+ "bot_edge_text_setting": {
219
+ "en": "Bottom Label",
220
+ 'cn': "下边缘标记",
221
+ },
222
+
223
+ "output_image": {
224
+ "en": "Output Image",
225
+ 'cn': "输入图片",
226
+ },
227
+
228
+ "run": {
229
+ "en": "Run",
230
+ 'cn': "运行",
231
+ },
232
+ "examples": {
233
+ "en": "Examples",
234
+ 'cn': "案例",
235
+ },
236
+ }
utils/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .pixel_color_palette import PixelColorPalette
2
+ from .utils_haxagon_pixel import Tile, Painting, ColorSelection
3
+ from .utils_image_boarder import ImageBoarder
4
+ from .utils_image_text import ImageText
5
+ from .image_combine import image_paste_methods
6
+ from .utils_haxagon_color_grid import HexagonGrid
7
+ from .utils_image_color_distribution import ColorPalette
8
+ from .utils_image_modification import ImageManipulation
9
+
10
+ __all__ = [
11
+ "PixelColorPalette",
12
+ "Tile",
13
+ "Painting",
14
+ "ColorSelection",
15
+ "ImageBoarder",
16
+ "ImageText",
17
+ "image_paste_methods",
18
+ "HexagonGrid",
19
+ "ColorPalette",
20
+ "ImageManipulation"
21
+ ]
utils/image_combine.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from torch import Tensor
3
+ from torchvision import transforms
4
+ import torch
5
+
6
+ from typing import Union
7
+
8
+
9
+ class image_paste_methods:
10
+ def __init__(
11
+ self,
12
+ pil_image1: Image.Image,
13
+ pil_image2: Image.Image,
14
+ resize_mode: str,
15
+ x: int,
16
+ y: int,
17
+ image1_mask: Union[Tensor, None],
18
+ image2_mask: Union[Tensor, None]
19
+ ) -> None:
20
+ self.image1 = pil_image1
21
+ self.image1_alpha = None
22
+
23
+ self.image2 = pil_image2
24
+ self.image2_alpha = None
25
+
26
+ if image1_mask is not None:
27
+ self.image1_alpha = self.transform_tensor_to_pil(image1_mask.unsqueeze(-1))
28
+
29
+ if image2_mask is not None:
30
+ self.image2_alpha = self.transform_tensor_to_pil(image2_mask.unsqueeze(-1))
31
+
32
+ self.resize_mode = resize_mode
33
+ self.x = x
34
+ self.y = y
35
+
36
+ @staticmethod
37
+ def transform_pil_to_tensor(pil_image: Image) -> Tensor:
38
+ """Conver pil image into tensor."""
39
+ # transform PIL into tensor
40
+ transform = transforms.Compose([
41
+ transforms.ToTensor(), # Convert the image to a tensor
42
+ ])
43
+ image_tensor = transform(pil_image)
44
+ image_tensor = image_tensor.unsqueeze(0)
45
+ image_tensor = image_tensor.permute(0, 2, 3, 1)
46
+ return image_tensor
47
+
48
+ @staticmethod
49
+ def transform_tensor_to_pil(image_tensor: Tensor) -> Image:
50
+ """Convert tensor image into PIL."""
51
+ # Ensure the tensor is in the correct shape (C, H, W)
52
+ if image_tensor.dim() == 4:
53
+ # Assuming the tensor shape is [B, H, W, C],
54
+ # permute it to [B, C, H, W] then remove the batch dimension
55
+ image_tensor = image_tensor.permute(0, 3, 1, 2).squeeze(0)
56
+ elif image_tensor.dim() == 3:
57
+ # If the tensor is [H, W, C], permute it to [C, H, W]
58
+ image_tensor = image_tensor.permute(2, 0, 1)
59
+
60
+ to_pil_image = transforms.ToPILImage()
61
+ pil_image = to_pil_image(image_tensor)
62
+ return pil_image
63
+
64
+ @staticmethod
65
+ def paste_by_alpha(base_image: Image,
66
+ source_image: Image,
67
+ source_image_alpha: Image,
68
+ x: int,
69
+ y: int
70
+ ) -> Image:
71
+ if source_image_alpha:
72
+ base_image.paste(source_image, (x, y), source_image_alpha)
73
+ else:
74
+ base_image.paste(source_image, (x, y))
75
+ return base_image
76
+
77
+ @staticmethod
78
+ def update_mask_with_content(base_mask: Image, source_image: Image, x: int, y: int) -> Image:
79
+ """Update the base mask by clearing areas where the source image has actual content."""
80
+ source_image_data = source_image.convert("RGBA").getdata()
81
+ base_mask_data = base_mask.getdata()
82
+
83
+ for row in range(source_image.height):
84
+ for col in range(source_image.width):
85
+ src_pixel_index = row * source_image.width + col
86
+ base_pixel_index = (row + y) * base_mask.width + (col + x)
87
+
88
+ # Check if the source image pixel is not transparent
89
+ if source_image_data[src_pixel_index][3] > 0:
90
+ # Update the base mask to remove the mask at this pixel
91
+ base_mask_data[base_pixel_index] = 0
92
+
93
+ base_mask.putdata(list(base_mask_data))
94
+ return base_mask
95
+
96
+ @staticmethod
97
+ def combine_masks(base_mask: Image, source_mask: Image, x: int, y: int) -> Image:
98
+ """Combine two masks, only for onto paste type."""
99
+ # Determine the dimensions required to fit both masks
100
+ source_left = x
101
+ source_top = y
102
+ source_right = x + source_mask.width
103
+ source_bot = y + source_mask.height
104
+
105
+ base_left = 0
106
+ base_top = 0
107
+ base_right = base_mask.width
108
+ base_bot = base_mask.height
109
+
110
+ minx = min([source_left, base_left])
111
+ maxx = max([source_right, base_right])
112
+
113
+ miny = min([source_top, base_top])
114
+ maxy = max([source_bot, base_bot])
115
+
116
+ new_width = maxx - minx
117
+ new_height = maxy - miny
118
+
119
+ combined_mask = Image.new('L', (new_width, new_height), 0)
120
+ if source_left < base_left:
121
+ if source_top < base_top:
122
+ combined_mask.paste(base_mask, (-x, -y))
123
+ combined_mask.paste(source_mask, (0, 0), source_mask)
124
+ else:
125
+ combined_mask.paste(base_mask, (-x, 0))
126
+ combined_mask.paste(source_mask, (0, y), source_mask)
127
+
128
+ else:
129
+ if source_top < base_top:
130
+ combined_mask.paste(base_mask, (0, -y))
131
+ combined_mask.paste(source_mask, (x, 0), source_mask)
132
+ else:
133
+ combined_mask.paste(base_mask, (0, 0))
134
+ combined_mask.paste(source_mask, (x, y), source_mask)
135
+ return image_paste_methods.transform_pil_to_tensor(combined_mask).squeeze(-1)
136
+
137
+
138
+ def paste_horizontal(self) -> Image:
139
+ """Paste 2 images horizontally with optional resizing."""
140
+ image1 = self.image1
141
+ image2 = self.image2
142
+
143
+ width1, height1 = image1.size
144
+ width2, height2 = image2.size
145
+
146
+ if self.resize_mode != "no-resize":
147
+ if self.resize_mode == 'to-big':
148
+ new_height = max(height1, height2)
149
+ elif self.resize_mode == 'to-small':
150
+ new_height = min(height1, height2)
151
+ image1 = image1.resize((int(width1 * new_height / height1), new_height))
152
+ image2 = image2.resize((int(width2 * new_height / height2), new_height))
153
+ if self.image1_alpha:
154
+ self.image1_alpha = self.image1_alpha.resize((int(width1 * new_height / height1), new_height))
155
+ if self.image2_alpha:
156
+ self.image2_alpha = self.image2_alpha.resize((int(width2 * new_height / height2), new_height))
157
+
158
+ if self.image1_alpha is not None:
159
+ if self.image2_alpha is not None:
160
+ new_mask = self.combine_masks(
161
+ self.image2_alpha,
162
+ self.image1_alpha,
163
+ 0-image1.width,
164
+ 0)
165
+ else:
166
+ new_mask = self.combine_masks(
167
+ self.transform_tensor_to_pil(torch.zeros(1, image2.height, image2.width).unsqueeze(-1)),
168
+ self.image1_alpha,
169
+ 0-image1.width,
170
+ 0)
171
+ else:
172
+ if self.image2_alpha is not None:
173
+ new_mask = self.combine_masks(
174
+ self.image2_alpha,
175
+ self.transform_tensor_to_pil(torch.zeros(1, image1.height, image1.width).unsqueeze(-1)),
176
+ 0-image1.width,
177
+ 0)
178
+ else:
179
+ new_mask = self.combine_masks(
180
+ self.transform_tensor_to_pil(torch.zeros(1, image2.height, image2.width).unsqueeze(-1)),
181
+ self.transform_tensor_to_pil(torch.zeros(1, image1.height, image1.width).unsqueeze(-1)),
182
+ 0-image1.width,
183
+ 0)
184
+
185
+ result_width = image1.width + image2.width
186
+ result_height = max(image1.height, image2.height)
187
+ result_image = Image.new('RGB', (result_width, result_height))
188
+
189
+ result_image = self.paste_by_alpha(result_image, image1, self.image1_alpha, 0, 0)
190
+ result_image = self.paste_by_alpha(result_image, image2, self.image2_alpha, image1.width, 0)
191
+
192
+ return result_image, new_mask
193
+
194
+
195
+ def paste_vertical(self) -> Image:
196
+ """Paste 2 images vertically with optional resizing."""
197
+ image1 = self.image1
198
+ image2 = self.image2
199
+
200
+ width1, height1 = image1.size
201
+ width2, height2 = image2.size
202
+
203
+
204
+ if self.resize_mode != "no-resize":
205
+ if self.resize_mode == 'to-big':
206
+ new_width = max(width1, width2)
207
+ elif self.resize_mode == 'to-small':
208
+ new_width = min(width1, width2)
209
+ image1 = image1.resize((new_width, int(height1 * new_width / width1)))
210
+ image2 = image2.resize((new_width, int(height2 * new_width / width2)))
211
+
212
+ if self.image1_alpha:
213
+ self.image1_alpha = self.image1_alpha.resize((new_width, int(height1 * new_width / width1)))
214
+ if self.image2_alpha:
215
+ self.image2_alpha = self.image2_alpha.resize((new_width, int(height2 * new_width / width2)))
216
+
217
+
218
+ if self.image1_alpha is not None:
219
+ if self.image2_alpha is not None:
220
+ new_mask = self.combine_masks(
221
+ self.image2_alpha,
222
+ self.image1_alpha,
223
+ 0,
224
+ 0-image1.height)
225
+ else:
226
+ new_mask = self.combine_masks(
227
+ self.transform_tensor_to_pil(torch.zeros(1, image2.height, image2.width).unsqueeze(-1)),
228
+ self.image1_alpha,
229
+ 0,
230
+ 0-image1.height)
231
+
232
+ else:
233
+ if self.image2_alpha is not None:
234
+ new_mask = self.combine_masks(
235
+ self.image2_alpha,
236
+ self.transform_tensor_to_pil(torch.zeros(1, image1.height, image1.width).unsqueeze(-1)),
237
+ 0,
238
+ 0-image1.height)
239
+ else:
240
+ new_mask = self.combine_masks(
241
+ self.transform_tensor_to_pil(torch.zeros(1, image2.height, image2.width).unsqueeze(-1)),
242
+ self.transform_tensor_to_pil(torch.zeros(1, image1.height, image1.width).unsqueeze(-1)),
243
+ 0,
244
+ 0-image1.height)
245
+
246
+ result_width = max(image1.width, image2.width)
247
+ result_height = image1.height + image2.height
248
+ result_image = Image.new('RGB', (result_width, result_height))
249
+
250
+ result_image = self.paste_by_alpha(result_image, image1, self.image1_alpha, 0, 0)
251
+ result_image = self.paste_by_alpha(result_image, image2, self.image2_alpha, 0, image1.height)
252
+ return result_image, new_mask
253
+
254
+
255
+ def paste_onto(self) -> Image:
256
+ """Paste image1 onto image2 at position (x, y), creating a new image that fits both."""
257
+ image1 = self.image1
258
+ image2 = self.image2
259
+
260
+ width1, height1 = image1.size
261
+ width2, height2 = image2.size
262
+ # Calculate new image size
263
+ new_width = max(width1 + max(0, self.x), width2 + max(0, -self.x))
264
+ new_height = max(height1 + max(0, self.y), height2 + max(0, -self.y))
265
+ # create new empty image, that can contain both 1 and 2
266
+ new_image = Image.new('RGBA', (new_width, new_height), (255, 255, 255, 0))
267
+ # calculate location
268
+ image1_position = (max(0, self.x), max(0, self.y))
269
+ image2_position = (max(0, -self.x), max(0, -self.y))
270
+
271
+
272
+ # paste both onto new empty image
273
+ new_image = self.paste_by_alpha(new_image, image2, self.image2_alpha, image2_position[0], image2_position[1])
274
+ new_image = self.paste_by_alpha(new_image, image1, self.image1_alpha, image1_position[0], image1_position[1])
275
+
276
+ if self.image1_alpha is not None:
277
+ if self.image2_alpha is not None:
278
+ new_mask = self.combine_masks(self.image2_alpha,
279
+ self.image1_alpha,
280
+ self.x,
281
+ self.y)
282
+ else:
283
+ new_mask = self.combine_masks(self.transform_tensor_to_pil(torch.zeros(1, height2, width2).unsqueeze(-1)),
284
+ self.image1_alpha,
285
+ self.x,
286
+ self.y)
287
+
288
+ else:
289
+ if self.image2_alpha is not None:
290
+ new_mask = self.combine_masks(self.image2_alpha,
291
+ self.transform_tensor_to_pil(torch.zeros(1, height1, width1).unsqueeze(-1)),
292
+ self.x,
293
+ self.y)
294
+ else:
295
+ new_mask = self.combine_masks(self.transform_tensor_to_pil(torch.zeros(1, height2, width2).unsqueeze(-1)),
296
+ self.transform_tensor_to_pil(torch.zeros(1, height1, width1).unsqueeze(-1)),
297
+ self.x,
298
+ self.y)
299
+ return new_image, new_mask
utils/pixel_color_palette.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import colorsys
3
+ from PIL import Image
4
+ import numpy as np
5
+ from typing import List, Tuple, Union, Callable, Any
6
+ from tqdm import tqdm
7
+ from sklearn.cluster import KMeans
8
+ from langchain_community.vectorstores.utils import DistanceStrategy
9
+ from torch import Tensor
10
+ import torchvision.transforms as transforms
11
+ from skimage import color
12
+ from .utils_vector_store import FaissVectorStore, MockEmbedding
13
+ import os
14
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
15
+
16
+ class RGBTransformation:
17
+ @staticmethod
18
+ def original(rgbs: List[Tuple]) -> List[Tuple]:
19
+ return rgbs
20
+
21
+ @staticmethod
22
+ def lab_color(rgbs: List[Tuple]) -> List[Tuple]:
23
+ """Convert rgb to lab color."""
24
+ return color.rgb2lab(np.array(rgbs)/255).tolist()
25
+
26
+ @staticmethod
27
+ def lab_lightness(rgbs: List[Tuple]) -> List[Tuple]:
28
+ """Convert rgb to lightness color."""
29
+ return color.rgb2lab(np.array(rgbs)/255)[:,0].reshape(-1, 1).tolist()
30
+
31
+ @staticmethod
32
+ def lab_red_green(rgbs: List[Tuple]) -> List[Tuple]:
33
+ """Convert rgb to red green color."""
34
+ return color.rgb2lab(np.array(rgbs)/255)[:,1].reshape(-1, 1).tolist()
35
+
36
+ @staticmethod
37
+ def lab_blue_yellow(rgbs: List[Tuple]) -> List[Tuple]:
38
+ """Convert rgb to blue yellow color."""
39
+ return color.rgb2lab(np.array(rgbs)/255)[:,2].reshape(-1, 1).tolist()
40
+
41
+ @staticmethod
42
+ def brightness(rgbs: List[Tuple]) -> List[Tuple]:
43
+ """Convert rgb to brightness"""
44
+ return (np.array(rgbs)/3).sum(axis=1).reshape(-1, 1).tolist()
45
+
46
+
47
+ @staticmethod
48
+ def hsv_color(rgbs: List[Tuple]) -> List[Tuple]:
49
+ """Convert rgb to hsv"""
50
+ return [list(colorsys.rgb_to_hsv(*rgb)) for rgb in rgbs]
51
+
52
+
53
+ @staticmethod
54
+ def hue(rgbs: List[Tuple]) -> List[Tuple]:
55
+ """Convert rgb to hue"""
56
+ return [[colorsys.rgb_to_hsv(*rgb)[0]] for rgb in rgbs]
57
+
58
+
59
+ @staticmethod
60
+ def saturation(rgbs: List[Tuple]) -> List[Tuple]:
61
+ """Convert rgb to saturation"""
62
+ return [[colorsys.rgb_to_hsv(*rgb)[1]] for rgb in rgbs]
63
+
64
+
65
+ @staticmethod
66
+ def value(rgbs: List[Tuple]) -> List[Tuple]:
67
+ """Convert rgb to value"""
68
+ return [[colorsys.rgb_to_hsv(*rgb)[2]] for rgb in rgbs]
69
+
70
+
71
+ def call_method(self, method_name: str) -> Callable[..., Any]:
72
+ """Get the corresponding method by name"""
73
+ method = getattr(self, method_name, lambda: "method not exist.")
74
+ return method
75
+
76
+
77
+ class PixelColorPalette:
78
+ def __init__(
79
+ self,
80
+ pil_image: Image.Image,
81
+ transformation_method: str,
82
+ rgb_palette: Union[str, None],
83
+ max_colors: int, # will be overwrite by rgb_palette,
84
+ distance_strategy: str
85
+ ) -> None:
86
+ """Initialize translation."""
87
+ self.pil_image = pil_image
88
+ self.pixels = self.pil_image.load()
89
+ self.transformation_method = transformation_method
90
+ self.rgb_palette = rgb_palette
91
+ self.max_colors = max_colors
92
+ self.transformation_function = RGBTransformation().call_method(transformation_method)
93
+ if distance_strategy == "cosine":
94
+ distance_strategy =DistanceStrategy.COSINE
95
+ elif distance_strategy == "euclidean":
96
+ distance_strategy =DistanceStrategy.EUCLIDEAN_DISTANCE
97
+
98
+ self.width = self.pil_image.size[0]
99
+ self.height = self.pil_image.size[1]
100
+
101
+ self.original_rgbs = []
102
+ for px in range(self.width):
103
+ for py in range(self.height):
104
+ self.original_rgbs.append(self.pixels[px, py])
105
+ self.color_maps = self.transformation_function(self.original_rgbs)
106
+
107
+ # if specied palette we create our vector db for color map
108
+ if self.rgb_palette:
109
+ all_rgb_doc = []
110
+ metadatas = []
111
+ self.color_palette = self.parse_string_to_tuples(self.rgb_palette)
112
+ self.color_space = self.transformation_function(self.color_palette)
113
+ for (r, g, b), colormap in zip(self.color_palette, self.color_space):
114
+ all_rgb_doc.append(('', colormap))
115
+ metadatas.append({'r': r, "g": g, "b": b})
116
+ self.faiss_db = FaissVectorStore(all_rgb_doc,
117
+ metadatas,
118
+ MockEmbedding(),
119
+ distance_strategy=distance_strategy)
120
+ else:
121
+ all_rgb_doc = []
122
+ metadatas = []
123
+ for (r, g, b), colormap in zip(self.original_rgbs, self.color_maps):
124
+ all_rgb_doc.append(('', colormap))
125
+ metadatas.append({'r': r, "g": g, "b": b})
126
+ self.faiss_db = FaissVectorStore(all_rgb_doc,
127
+ metadatas,
128
+ MockEmbedding(),
129
+ distance_strategy=distance_strategy)
130
+ self.clustering_color_group()
131
+
132
+ @staticmethod
133
+ def parse_string_to_tuples(s: str) -> List[Tuple]:
134
+ # Remove spaces to avoid issues with ast.literal_eval
135
+ s_cleaned = ''.join(s.split())
136
+ # Wrap the string in brackets to make it a list literal
137
+ s_list_literal = f'[{s_cleaned}]'
138
+ try:
139
+ # Safely evaluate the string as a Python literal
140
+ result = ast.literal_eval(s_list_literal)
141
+ except ValueError:
142
+ # Handle the case where the string could not be parsed
143
+ result = []
144
+ return result
145
+
146
+
147
+ @staticmethod
148
+ def transform_pil_to_tensor(pil_image: Image) -> Tensor:
149
+ """Conver pil image into tensor."""
150
+ # transform PIL into tensor
151
+ transform = transforms.Compose([
152
+ transforms.ToTensor(), # Convert the image to a tensor
153
+ ])
154
+ image_tensor = transform(pil_image)
155
+ image_tensor = image_tensor.unsqueeze(0)
156
+ image_tensor = image_tensor.permute(0, 2, 3, 1)
157
+ return image_tensor
158
+
159
+ @staticmethod
160
+ def transform_tensor_to_pil(image_tensor: Tensor) -> Image:
161
+ """Convert tensor image into PIL."""
162
+ # Ensure the tensor is in the correct shape (C, H, W)
163
+ if image_tensor.dim() == 4:
164
+ # Assuming the tensor shape is [B, H, W, C],
165
+ # permute it to [B, C, H, W] then remove the batch dimension
166
+ image_tensor = image_tensor.permute(0, 3, 1, 2).squeeze(0)
167
+ elif image_tensor.dim() == 3:
168
+ # If the tensor is [H, W, C], permute it to [C, H, W]
169
+ image_tensor = image_tensor.permute(2, 0, 1)
170
+
171
+ to_pil_image = transforms.ToPILImage()
172
+ pil_image = to_pil_image(image_tensor)
173
+ return pil_image
174
+
175
+ def clustering_color_group(self) -> None:
176
+ """Get the color groups using Kmean"""
177
+ X = np.array(self.color_maps)
178
+ self.kmeans = KMeans(n_clusters=self.max_colors, random_state=0, n_init=self.max_colors).fit(X)
179
+ center_rgbs = []
180
+ for center in self.kmeans.cluster_centers_.tolist():
181
+ rgb_metadata = self.faiss_db.db.similarity_search_by_vector(center)[0].metadata
182
+ given_r = rgb_metadata['r']
183
+ given_g = rgb_metadata['g']
184
+ given_b = rgb_metadata['b']
185
+ center_rgbs.append([given_r, given_g, given_b])
186
+ self.center_rgbs = np.array(center_rgbs)
187
+
188
+ def translate_image_vector_db(self) -> Image.Image:
189
+ """Get the image with new mapped colors."""
190
+ processed_img_final = Image.new('RGB', (self.width, self.height), (255, 255, 255))
191
+ for ix, px in enumerate(tqdm(range(self.width))):
192
+ for iy, py in enumerate(range(self.height)):
193
+ color_transformed = self.color_maps[ix * self.height + iy]
194
+ rgb_metadata = self.faiss_db.db.similarity_search_by_vector(color_transformed)[0].metadata
195
+ given_r = rgb_metadata['r']
196
+ given_g = rgb_metadata['g']
197
+ given_b = rgb_metadata['b']
198
+ processed_img_final.putpixel((px, py), (given_r, given_g, given_b))
199
+ return processed_img_final
200
+
201
+
202
+ def translate_image_kmean(self) -> Image.Image:
203
+ """Get the image with new clustered colors."""
204
+ mapped_rgb_colors = self.center_rgbs[self.kmeans.labels_]
205
+ processed_img_final = Image.new('RGB', (self.width, self.height), (255, 255, 255))
206
+ for ix, px in enumerate(tqdm(range(self.width))):
207
+ for iy, py in enumerate(range(self.height)):
208
+ rgb_transformed = mapped_rgb_colors[ix * self.height + iy]
209
+ given_r = rgb_transformed[0]
210
+ given_g = rgb_transformed[1]
211
+ given_b = rgb_transformed[2]
212
+ processed_img_final.putpixel((px, py), (given_r, given_g, given_b))
213
+ return processed_img_final
214
+
215
+ def translate(self) -> Tensor:
216
+ """Return final image tensor."""
217
+ if self.rgb_palette:
218
+ return self.translate_image_vector_db()
219
+ else:
220
+ return self.translate_image_kmean()
utils/utils_haxagon_color_grid.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ from PIL import Image, ImageDraw
3
+ from typing import Tuple, List
4
+
5
+
6
+ class HexagonGrid:
7
+ def __init__(self, colors: str, width: int, hex_height: int, tip_height: int, bg_color=str):
8
+ self.width = width
9
+ self.hex_height = hex_height
10
+ self.tip_height = tip_height
11
+ self.colors = self.parse_string_to_tuples(colors)
12
+ self.bg_color = bg_color
13
+
14
+ @staticmethod
15
+ def parse_string_to_tuples(s: str) -> List[Tuple]:
16
+ # Remove spaces to avoid issues with ast.literal_eval
17
+ s_cleaned = ''.join(s.split())
18
+ # Wrap the string in brackets to make it a list literal
19
+ s_list_literal = f'[{s_cleaned}]'
20
+ try:
21
+ # Safely evaluate the string as a Python literal
22
+ result = ast.literal_eval(s_list_literal)
23
+ except ValueError:
24
+ # Handle the case where the string could not be parsed
25
+ result = []
26
+ return result
27
+
28
+
29
+ @staticmethod
30
+ def calculate_hexagon_points(center: Tuple, width: int, hex_height: int, tip_height: int) -> List[Tuple]:
31
+ """Calculate the points of a hexagon based on the width, total height, and tip height."""
32
+ # These points define the hexagon starting from the top-middle point, clockwise.
33
+ return [
34
+ (center[0], center[1] - hex_height / 2 - tip_height), # Top
35
+ (center[0] + width/2, center[1] - hex_height / 2), # Top right
36
+ (center[0] + width/2, center[1] + hex_height / 2), # Bottom right
37
+ (center[0], center[1] + hex_height / 2 + tip_height), # Bottom
38
+ (center[0] - width/2, center[1] + hex_height / 2), # Bottom left
39
+ (center[0] - width/2, center[1] - hex_height / 2), # Top left
40
+ ]
41
+
42
+ def create_color_hexagon_grid(self) -> Image.Image:
43
+ """
44
+ Create an image of a row of colored hexagons with specified width, height, and tip height.
45
+
46
+ :param colors: List of RGB color tuples for the hexagons.
47
+ :param width: The full width of each hexagon.
48
+ :param hex_height: The full height of each hexagon.
49
+ :param tip_height: The height of the top and bottom triangles (tips) of each hexagon.
50
+ :return: A PIL image with the hexagon grid.
51
+ """
52
+ # Calculate the necessary image width and height
53
+ image_width = self.width * len(self.colors)
54
+ image_height = self.hex_height + 2 * self.tip_height
55
+
56
+ # Create a new image
57
+ image = Image.new('RGB', (image_width, image_height), self.bg_color)
58
+ draw = ImageDraw.Draw(image)
59
+
60
+ # Draw each hexagon
61
+ for i, color in enumerate(self.colors):
62
+ # Calculate the center of the hexagon
63
+ center_x = self.width * i + self.width / 2
64
+ center = (center_x, self.hex_height / 2 + self.tip_height)
65
+ # Get the points for the hexagon
66
+ hexagon_points = self.calculate_hexagon_points(center, self.width, self.hex_height, self.tip_height)
67
+ draw.polygon(hexagon_points, fill=color)
68
+ return image
utils/utils_haxagon_pixel.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ from skimage import color
3
+ from langchain_community.vectorstores import FAISS
4
+ from langchain.embeddings.base import Embeddings
5
+ from typing import List, Tuple, Dict, Union
6
+ from PIL import Image
7
+ import numpy as np
8
+ from tqdm import tqdm
9
+ from langchain_community.vectorstores.utils import DistanceStrategy
10
+ from torch import Tensor
11
+ import torchvision.transforms as transforms
12
+ from collections import Counter
13
+ import os
14
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
15
+ from typing import (
16
+ Dict,
17
+ Iterable,
18
+ List,
19
+ Tuple,
20
+ )
21
+
22
+
23
+ class FaissVectorStore:
24
+ """In memory faiss vector store."""
25
+
26
+ def __init__(
27
+ self,
28
+ documents: List[Tuple[str, List[float]]],
29
+ metadatas: List[Dict],
30
+ embedding: Embeddings,
31
+ distance_strategy: DistanceStrategy,
32
+ **kwargs
33
+ ):
34
+ """
35
+ Initialize the in-memory FAISS data retriever.
36
+ Documents can also be text (and corresponding embedding). In
37
+ this case, we will use the provided vectors, but metadata needs
38
+ to be provided as a list of dictionaries.
39
+ """
40
+
41
+ self.db = self.__create_vectorstore_from_embeddings(
42
+ text_embeddings=documents,
43
+ embedding=embedding,
44
+ metadatas=metadatas,
45
+ distance_strategy=distance_strategy
46
+ )
47
+
48
+ def __create_vectorstore_from_embeddings(
49
+ self,
50
+ text_embeddings: Iterable[Tuple[str, List[float]]],
51
+ embedding: Embeddings,
52
+ metadatas: Iterable[dict],
53
+ distance_strategy: DistanceStrategy
54
+ ) -> FAISS:
55
+ """Create vector store with text and embeddings."""
56
+ db = FAISS.from_embeddings(text_embeddings=text_embeddings,
57
+ embedding=embedding,
58
+ metadatas=metadatas,
59
+ distance_strategy=distance_strategy)
60
+ return db
61
+
62
+ class MockEmbedding(Embeddings):
63
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
64
+ """Embed search docs."""
65
+
66
+ def embed_query(self, text: str) -> List[float]:
67
+ """Embed query text."""
68
+
69
+
70
+ class Tile:
71
+ """Data Definition of a tile"""
72
+ def __init__(
73
+ self,
74
+ x_center: Union[int, float],
75
+ y_center: Union[int, float],
76
+ width: Union[int, float],
77
+ height_total: Union[int, float],
78
+ height_tip: Union[int, float]
79
+ ):
80
+ self.width = width
81
+ self.half_width = width / 2
82
+ self.height_tip = height_tip
83
+ self.height_middle = (height_total - height_tip * 2)
84
+ self.x_center = x_center
85
+ self.y_center = y_center
86
+
87
+ self.x_left = x_center - self.half_width
88
+ self.x_right = x_center + self.half_width
89
+ self.y_top = y_center + height_total/2
90
+ self.y_bottom = y_center - height_total/2
91
+ self.y_box_top = self.y_top - self.height_tip
92
+ self.y_box_bottom = self.y_bottom + self.height_tip
93
+
94
+
95
+ def contains_coord(self, x, y):
96
+ """
97
+ Given the coordinate of a 2-dimentional point (x, y),
98
+ determine if the point lies in the tile.
99
+ This approach checks conditions on the cartesian coordinates; can also use geometry approach by checking the angles
100
+ """
101
+ diagonal_slope = self.height_tip / self.half_width
102
+
103
+ return (
104
+ self.x_left <= x <= self.x_right
105
+ ) and (
106
+ self.y_bottom <= y <= self.y_top
107
+ ) and not (
108
+ (
109
+ y > self.y_box_top and (
110
+ (x < self.x_center and y > self.y_box_top + diagonal_slope * (x - self.x_left)) or
111
+ (x > self.x_center and y > self.y_box_top - diagonal_slope * (x - self.x_right))
112
+ )
113
+ ) or (
114
+ y < self.y_box_bottom and (
115
+ (x < self.x_center and y < self.y_box_bottom - diagonal_slope * (x - self.x_left)) or
116
+ (x > self.x_center and y < self.y_box_bottom + diagonal_slope * (x - self.x_right))
117
+ )
118
+ )
119
+ )
120
+
121
+
122
+ def coords_contained(self):
123
+ """
124
+ Produce a list of all (x, y) coordinates where x and y are integers, and (x, y) is contained within the tile
125
+ """
126
+ coords = []
127
+ for x in range(int(self.x_left), int(self.x_right)+1):
128
+ for y in range(int(self.y_bottom), int(self.y_top)+1):
129
+ if self.contains_coord(x, y):
130
+ coords.append((x, y))
131
+ return coords
132
+
133
+
134
+ class Painting:
135
+ def __init__(
136
+ self,
137
+ width: int,
138
+ height: int,
139
+ tile_width: int,
140
+ tile_height: int,
141
+ tile_height_tip: int,
142
+ gap_horizontal: int,
143
+ gap_vertical: int,
144
+ gap_color: str,
145
+ ):
146
+ self.width = width
147
+ self.height = height
148
+ self.tile_width = tile_width
149
+ self.tile_height = tile_height
150
+ self.tile_height_tip = tile_height_tip
151
+ self.gap_horizontal = gap_horizontal
152
+ self.gap_vertical = gap_vertical
153
+ self.gap_color = gap_color
154
+
155
+ self.tile_height_middle = tile_height - tile_height_tip * 2
156
+
157
+ x_centers, idx_alternating = [0, tile_width/2 + gap_horizontal / 2], 1
158
+ y_center = tile_height/2 # the y-center of the top row of tiles
159
+
160
+ self.tiles = []
161
+ self.tile_pixels = []
162
+
163
+ while y_center-tile_height/2 < self.height:
164
+ idx_alternating = (idx_alternating + 1) % 2
165
+ x_center = x_centers[idx_alternating]
166
+
167
+ while x_center-tile_width/2 < self.width:
168
+ tile = Tile(x_center, y_center, tile_width, tile_height, tile_height_tip)
169
+ self.tiles.append(tile)
170
+ self.tile_pixels.append(tile.coords_contained())
171
+ x_center += (self.tile_width + self.gap_horizontal)
172
+
173
+ y_center += (self.tile_height_middle + self.tile_height_tip + self.gap_vertical)
174
+
175
+
176
+ class Translation:
177
+ def __init__(
178
+ self,
179
+ painting: Painting,
180
+ colors: str,
181
+ lab_space: str,
182
+ distance_strategy: str
183
+ ) -> None:
184
+ """Initialize translation."""
185
+ self.painting = painting
186
+ self.colors = self.parse_string_to_tuples(colors)
187
+ self.lab_space = lab_space
188
+ all_rgb_doc = []
189
+ metadatas = []
190
+ for (r, g, b) in self.colors:
191
+ all_rgb_doc.append(('', list(self.rgb2lab((r, g, b)))))
192
+ metadatas.append({'r': r, "g": g, "b": b})
193
+
194
+ if distance_strategy.startswith('c'):
195
+ distance_strategy=DistanceStrategy.COSINE
196
+ else:
197
+ distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE
198
+ self.faiss_db = FaissVectorStore(all_rgb_doc,
199
+ metadatas,
200
+ MockEmbedding(),
201
+ distance_strategy=distance_strategy)
202
+
203
+ def rgb2lab(self, rgb: Tuple) -> Tuple:
204
+ """Convert rgb to lab color."""
205
+ if self.lab_space == 'true':
206
+ r, g, b = rgb
207
+ lab = color.rgb2lab((r/255, g/255, b/255))
208
+ return tuple(lab)
209
+ else:
210
+ return rgb
211
+
212
+ @staticmethod
213
+ def parse_string_to_tuples(s: str) -> List[Tuple]:
214
+ # Remove spaces to avoid issues with ast.literal_eval
215
+ s_cleaned = ''.join(s.split())
216
+ # Wrap the string in brackets to make it a list literal
217
+ s_list_literal = f'[{s_cleaned}]'
218
+ try:
219
+ # Safely evaluate the string as a Python literal
220
+ result = ast.literal_eval(s_list_literal)
221
+ except ValueError:
222
+ # Handle the case where the string could not be parsed
223
+ result = []
224
+ return result
225
+
226
+ def transform_pil_to_tensor(self, pil_image: Image) -> Tensor:
227
+ """Conver pil image into tensor."""
228
+ # transform PIL into tensor
229
+ transform = transforms.Compose([
230
+ transforms.ToTensor(), # Convert the image to a tensor
231
+ ])
232
+ image_tensor = transform(pil_image)
233
+ image_tensor = image_tensor.unsqueeze(0)
234
+ image_tensor = image_tensor.permute(0, 2, 3, 1)
235
+ return image_tensor
236
+
237
+ def translate_image_tensor(self, image_tensor: Tensor) -> Tensor:
238
+ image_tensor = image_tensor.squeeze(0).permute(2, 0, 1)
239
+ # Convert the tensor to a PIL Image
240
+ to_pil_image = transforms.ToPILImage()
241
+ original_image = to_pil_image(image_tensor)
242
+
243
+ new_size = (self.painting.width, self.painting.height)
244
+ upscaled_image = original_image.resize(new_size)
245
+ pixels = upscaled_image.load()
246
+
247
+ if self.painting.gap_color == "white":
248
+ new_img_bg = (255, 255, 255)
249
+ elif self.painting.gap_color == "black":
250
+ new_img_bg = (0, 0, 0)
251
+
252
+ processed_img_final = Image.new('RGB', new_size, new_img_bg)
253
+
254
+ for corresponding_coords in tqdm(self.painting.tile_pixels):
255
+ coords_pixels = np.array([pixels[x, y] for x, y in corresponding_coords if 0 <= x < self.painting.width and 0 <= y < self.painting.height])
256
+ if len(coords_pixels.shape) == 1:
257
+ continue
258
+ r, g, b = int(np.mean(coords_pixels[:, 0])), int(np.mean(coords_pixels[:, 1])), int(np.mean(coords_pixels[:, 2]))
259
+ rgb_metadata = self.faiss_db.db.similarity_search_by_vector(list(self.rgb2lab((r, g, b))))[0].metadata
260
+ given_r = rgb_metadata['r']
261
+ given_g = rgb_metadata['g']
262
+ given_b = rgb_metadata['b']
263
+
264
+ for x, y in corresponding_coords:
265
+ if 0 <= x < self.painting.width and 0 <= y < self.painting.height:
266
+ processed_img_final.putpixel((x, y), (given_r, given_g, given_b))
267
+
268
+ return self.transform_pil_to_tensor(processed_img_final)
269
+
270
+
271
+
272
+ class ColorSelection:
273
+ def __init__(
274
+ self,
275
+ pil_image: Image.Image,
276
+ selection_type: str,
277
+ painting: Painting,
278
+ ) -> None:
279
+ """Initialize translation."""
280
+ self.selection_type = selection_type
281
+ self.pil_image = pil_image
282
+ self.painting = painting
283
+
284
+ @staticmethod
285
+ def rgb_vote(rgbs: List[Tuple]) -> Tuple:
286
+ """Select the most common rgb of a tile."""
287
+ counter = Counter(rgbs)
288
+ most_common_tuple, count = counter.most_common(1)[0]
289
+ return most_common_tuple
290
+
291
+ def translate_image_tensor(self) -> Image.Image:
292
+ new_size = (self.painting.width, self.painting.height)
293
+ upscaled_image = self.pil_image.resize(new_size)
294
+ pixels = upscaled_image.load()
295
+
296
+ if self.painting.gap_color == "white":
297
+ new_img_bg = (255, 255, 255)
298
+ elif self.painting.gap_color == "black":
299
+ new_img_bg = (0, 0, 0)
300
+ processed_img_final = Image.new('RGB', new_size, new_img_bg)
301
+ for corresponding_coords in self.painting.tile_pixels:
302
+ coords_pixels = [pixels[x, y] for x, y in corresponding_coords if 0 <= x < self.painting.width and 0 <= y < self.painting.height]
303
+ if not coords_pixels:
304
+ continue
305
+ if self.selection_type.startswith("average"):
306
+ coords_pixels = np.array(coords_pixels)
307
+ r, g, b = int(np.mean(coords_pixels[:, 0])), int(np.mean(coords_pixels[:, 1])), int(np.mean(coords_pixels[:, 2]))
308
+ elif self.selection_type.startswith("marjority"):
309
+ r, g, b = self.rgb_vote(coords_pixels)
310
+ for x, y in corresponding_coords:
311
+ if 0 <= x < self.painting.width and 0 <= y < self.painting.height:
312
+ processed_img_final.putpixel((x, y), (r, g, b))
313
+ return processed_img_final
utils/utils_image_boarder.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image, ImageDraw
2
+ from typing import Tuple, Dict
3
+
4
+ class ImageBoarder:
5
+ def __init__(self, image: Image.Image, line_distances:Dict, line_width: int, line_color: str, margin_widths: Dict) -> None:
6
+ self.image = image
7
+ self.line_distances = line_distances
8
+ self.line_width = line_width
9
+ self.line_color = line_color
10
+ self.margin_widths = margin_widths
11
+
12
+
13
+ def add_custom_lines(self) -> Image.Image:
14
+ """
15
+ image, line_distances, line_width=5, line_color="red", margin_widths: int
16
+ """
17
+ # Calculate new image dimensions considering line distances
18
+ left_extend = max(self.line_distances['left'], 0)
19
+ right_extend = max(self.line_distances['right'], 0)
20
+ top_extend = max(self.line_distances['top'], 0)
21
+ bottom_extend = max(self.line_distances['bottom'], 0)
22
+
23
+ new_width = self.image.width + left_extend + right_extend
24
+ new_height = self.image.height + top_extend + bottom_extend
25
+
26
+ # Create a new image with additional space for lines
27
+ new_image = Image.new("RGB", (new_width, new_height), "white")
28
+ new_image.paste(self.image, (left_extend, top_extend))
29
+
30
+ draw = ImageDraw.Draw(new_image)
31
+
32
+ # Draw lines
33
+ draw.line([(left_extend - self.line_distances['left'], top_extend), (left_extend - self.line_distances['left'], new_height - bottom_extend)], fill=self.line_color, width=self.line_width)
34
+ draw.line([(new_width - right_extend + self.line_distances['right'], top_extend), (new_width - right_extend + self.line_distances['right'], new_height - bottom_extend)], fill=self.line_color, width=self.line_width)
35
+ draw.line([(left_extend, top_extend - self.line_distances['top']), (new_width - right_extend, top_extend - self.line_distances['top'])], fill=self.line_color, width=self.line_width)
36
+ draw.line([(left_extend, new_height - bottom_extend + self.line_distances['bottom']), (new_width - right_extend, new_height - bottom_extend + self.line_distances['bottom'])], fill=self.line_color, width=self.line_width)
37
+
38
+ # Add margins
39
+ final_image = Image.new("RGB", (new_image.width + self.margin_widths['left'] + self.margin_widths['right'], new_image.height + self.margin_widths['top'] + self.margin_widths['bottom']), "white")
40
+ final_image.paste(new_image, (self.margin_widths['left'], self.margin_widths['top']))
41
+ return final_image
utils/utils_image_color_distribution.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from PIL import Image, ImageDraw
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ from collections import Counter
6
+ import io
7
+
8
+ class ColorPalette:
9
+ def __init__(
10
+ self,
11
+ pil_image: Image.Image,
12
+ color_block_size: int,
13
+ ) -> None:
14
+ """Initialize translation."""
15
+ self.pil_image = pil_image
16
+ self.pixels = self.pil_image.load()
17
+ self.color_block_size = color_block_size
18
+ self.image_rgbs = []
19
+ self.rgbs = []
20
+
21
+ self.width = self.pil_image.size[0]
22
+ self.height = self.pil_image.size[1]
23
+ for px in range(self.width):
24
+ for py in range(self.height):
25
+ self.rgbs.append(self.pixels[px, py])
26
+
27
+ color_counts = Counter(self.rgbs)
28
+ sorted_colors = sorted(color_counts.items(), key=lambda x: x[1], reverse=True)
29
+ self.rgb_list, self.counts = zip(*sorted_colors)
30
+ self.rgbs_string = f"{self.rgb_list}".strip()[1: -1]
31
+ self.total_colors = len(set(self.rgbs))
32
+
33
+ def create_color_blocks(self) -> Image.Image:
34
+ """
35
+ Create an image consisting of square color blocks for each RGB value in the list,
36
+ arranged in an n by m grid.
37
+ - An Image object containing all the color blocks arranged in an n by m grid.
38
+ """
39
+ # Calculate the number of rows and columns to make the grid as square as possible
40
+ num_colors = len(self.rgb_list)
41
+ grid_cols = int(math.ceil(math.sqrt(num_colors)))
42
+ grid_rows = int(math.ceil(num_colors / grid_cols))
43
+ # Calculate the overall image size based on the grid
44
+ img_width = self.color_block_size * grid_cols
45
+ img_height = self.color_block_size * grid_rows
46
+ # Create a new blank image
47
+ img = Image.new('RGB', (img_width, img_height), 'white')
48
+ # Create a draw object to draw on the image
49
+ draw = ImageDraw.Draw(img)
50
+ # Loop through each RGB value and draw a color block in the grid
51
+ for i, color in enumerate(self.rgb_list):
52
+ row = i // grid_cols
53
+ col = i % grid_cols
54
+ # Calculate the top left corner of the current block
55
+ top_left = (col * self.color_block_size, row * self.color_block_size)
56
+ # Calculate the bottom right corner of the current block
57
+ bottom_right = ((col + 1) * self.color_block_size, (row + 1) * self.color_block_size)
58
+ # Draw the rectangle with the current color
59
+ draw.rectangle([top_left, bottom_right], fill=color)
60
+ # [1, -1] is to remove the [ and ] of the list
61
+ return img
62
+
63
+ def create_color_dist(self) -> Image.Image:
64
+ """Create a distribution plot of colors."""
65
+ plt.figure(figsize=(15, 8))
66
+ bars = plt.bar(range(len(self.counts)), self.counts, color=np.array(self.rgb_list)/255.0, edgecolor='black')
67
+ plt.xlabel('Colors')
68
+ plt.ylabel('Counts')
69
+ plt.title('Color Distribution')
70
+ color_labels = [f'{color}' for color in self.rgb_list]
71
+ plt.xticks(range(len(self.counts)), color_labels, rotation=90, ha='right')
72
+ buf = io.BytesIO()
73
+ plt.savefig(buf, format='png')
74
+ buf.seek(0)
75
+ pil_image = Image.open(buf)
76
+ return pil_image
utils/utils_image_modification.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from torch import Tensor
3
+ from torchvision import transforms
4
+
5
+
6
+ class ImageManipulation:
7
+ def __init__(self, pil_image: Image.Image) -> None:
8
+ self.pil_image = pil_image
9
+
10
+ @staticmethod
11
+ def transform_pil_to_tensor(pil_image: Image) -> Tensor:
12
+ """Conver pil image into tensor."""
13
+ # transform PIL into tensor
14
+ transform = transforms.Compose([
15
+ transforms.ToTensor(), # Convert the image to a tensor
16
+ ])
17
+ image_tensor = transform(pil_image)
18
+ image_tensor = image_tensor.unsqueeze(0)
19
+ image_tensor = image_tensor.permute(0, 2, 3, 1)
20
+ return image_tensor
21
+
22
+ @staticmethod
23
+ def transform_tensor_to_pil(image_tensor: Tensor) -> Image:
24
+ """Convert tensor image into PIL."""
25
+ # Ensure the tensor is in the correct shape (C, H, W)
26
+ if image_tensor.dim() == 4:
27
+ # Assuming the tensor shape is [B, H, W, C],
28
+ # permute it to [B, C, H, W] then remove the batch dimension
29
+ image_tensor = image_tensor.permute(0, 3, 1, 2).squeeze(0)
30
+ elif image_tensor.dim() == 3:
31
+ # If the tensor is [H, W, C], permute it to [C, H, W]
32
+ image_tensor = image_tensor.permute(2, 0, 1)
33
+
34
+ to_pil_image = transforms.ToPILImage()
35
+ pil_image = to_pil_image(image_tensor)
36
+ return pil_image
37
+
38
+ def flip(self, flip_side: str) -> Image:
39
+ if flip_side == "horizontal":
40
+ image_flipped_horizontal = self.pil_image.transpose(Image.FLIP_LEFT_RIGHT)
41
+ return image_flipped_horizontal
42
+
43
+ if flip_side == "vertical":
44
+ image_flipped_vertical = self.pil_image.transpose(Image.FLIP_TOP_BOTTOM)
45
+ return image_flipped_vertical
46
+
47
+ def rotation(self, degree: int) -> Image:
48
+ image_rotated = self.pil_image.rotate(degree, expand=True)
49
+ return image_rotated
50
+
51
+
utils/utils_image_text.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image, ImageDraw, ImageFont
2
+ from typing import Tuple
3
+ import os
4
+
5
+ class ImageText:
6
+ def __init__(self, width: int, height: int, background_color: str, text: str, font_name: str, font_size: int, text_color: str, text_position: Tuple, abs_path: str) -> None:
7
+ self.width = width
8
+ self.height = height
9
+ self.background_color = background_color
10
+ self.text = text
11
+ self.abs_path = abs_path
12
+ self.font_name = font_name
13
+ self.font_size = font_size
14
+ self.text_color = text_color
15
+ self.text_position = text_position
16
+
17
+ def create_text_image(self) -> Image.Image:
18
+ image = Image.new('RGB', (self.width, self.height), color=self.background_color)
19
+ draw = ImageDraw.Draw(image)
20
+ try:
21
+ font = ImageFont.truetype(os.path.join(self.abs_path, self.font_name), self.font_size)
22
+ except IOError:
23
+ print(f"Font '{self.font_name}' not found. Using default font.")
24
+ font = ImageFont.load_default()
25
+
26
+ draw.text(self.text_position, self.text, fill=self.text_color, font=font)
27
+ return image
utils/utils_vector_store.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple, Dict
2
+ from langchain_community.vectorstores.utils import DistanceStrategy
3
+ import os
4
+
5
+ from typing import (
6
+ Dict,
7
+ Iterable,
8
+ List,
9
+ Tuple,
10
+ )
11
+
12
+ from langchain_community.vectorstores import FAISS
13
+ from langchain.embeddings.base import Embeddings
14
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
15
+
16
+
17
+ class FaissVectorStore:
18
+ """In memory faiss vector store."""
19
+
20
+ def __init__(
21
+ self,
22
+ documents: List[Tuple[str, List[float]]],
23
+ metadatas: List[Dict],
24
+ embedding: Embeddings,
25
+ distance_strategy: DistanceStrategy,
26
+ **kwargs
27
+ ):
28
+ """
29
+ Initialize the in-memory FAISS data retriever.
30
+ Documents can also be text (and corresponding embedding). In
31
+ this case, we will use the provided vectors, but metadata needs
32
+ to be provided as a list of dictionaries.
33
+ """
34
+
35
+ self.db = self.__create_vectorstore_from_embeddings(
36
+ text_embeddings=documents,
37
+ embedding=embedding,
38
+ metadatas=metadatas,
39
+ distance_strategy=distance_strategy
40
+ )
41
+
42
+ def __create_vectorstore_from_embeddings(
43
+ self,
44
+ text_embeddings: Iterable[Tuple[str, List[float]]],
45
+ embedding: Embeddings,
46
+ metadatas: Iterable[dict],
47
+ distance_strategy: DistanceStrategy
48
+ ) -> FAISS:
49
+ """Create vector store with text and embeddings."""
50
+ db = FAISS.from_embeddings(text_embeddings=text_embeddings,
51
+ embedding=embedding,
52
+ metadatas=metadatas,
53
+ distance_strategy=distance_strategy)
54
+ return db
55
+
56
+ class MockEmbedding(Embeddings):
57
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
58
+ """Embed search docs."""
59
+
60
+ def embed_query(self, text: str) -> List[float]:
61
+ """Embed query text."""