Akjava commited on
Commit
0aba763
·
1 Parent(s): 7ce3be6
glibvision/_init_.py ADDED
File without changes
glibvision/bbox_utils.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def expand_bbox(bbox,left=5,top=5,right=5,bottom=5):
2
+ left_pixel = bbox[2]*(float(left)/100)
3
+ top_pixel = bbox[3]*(float(top)/100)
4
+ right_pixel = bbox[2]*(float(right)/100)
5
+ bottom_pixel = bbox[3]*(float(bottom)/100)
6
+ new_box = list(bbox)
7
+ new_box[0] -=left_pixel
8
+ new_box[1] -=top_pixel
9
+ new_box[2] +=left_pixel+right_pixel
10
+ new_box[3] +=top_pixel+bottom_pixel
11
+ return new_box
12
+
13
+ def to_int_bbox(bbox):
14
+ int_box = [
15
+ int(bbox[0]),
16
+ int(bbox[1]),
17
+ int(bbox[2]),
18
+ int(bbox[3])
19
+ ]
20
+ return int_box
21
+
22
+ # for dlib rectangle
23
+ def to_right_bottom_bbox(bbox):
24
+ int_box = [
25
+ bbox[0],
26
+ bbox[1],
27
+ bbox[2]+bbox[0],
28
+ bbox[3]+bbox[1]
29
+ ]
30
+ return int_box
glibvision/common_utils.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ def check_exists_files(files,dirs,exit_on_error=True):
3
+ if files is not None:
4
+ if isinstance(files, str):
5
+ files = [files]
6
+ for file in files:
7
+ if not os.path.isfile(file):
8
+ print(f"File {file} not found")
9
+ if exit_on_error:
10
+ exit(1)
11
+ else:
12
+ return 1
13
+ if dirs is not None:
14
+ if isinstance(dirs, str):
15
+ dirs = [dirs]
16
+ for dir in dirs:
17
+ if not os.path.isdir(dir):
18
+ print(f"Dir {dir} not found")
19
+ if exit_on_error:
20
+ exit(1)
21
+ else:
22
+ return 1
23
+ return 0
24
+
25
+ image_extensions =[".jpg"]
26
+
27
+ def add_name_suffix(file_name,suffix,replace_suffix=False):
28
+ if not suffix.startswith("_"):#force add
29
+ suffix="_"+suffix
30
+
31
+ name,ext = os.path.splitext(file_name)
32
+ if replace_suffix:
33
+ index = name.rfind("_")
34
+ if index!=-1:
35
+ return f"{name[0:index]}{suffix}{ext}"
36
+
37
+ return f"{name}{suffix}{ext}"
38
+
39
+ def replace_extension(file_name,new_extension,suffix=None,replace_suffix=False):
40
+ if not new_extension.startswith("."):
41
+ new_extension="."+new_extension
42
+
43
+ name,ext = os.path.splitext(file_name)
44
+ new_file = f"{name}{new_extension}"
45
+ if suffix:
46
+ return add_name_suffix(name+new_extension,suffix,replace_suffix)
47
+ return new_file
48
+
49
+ def list_digit_images(input_dir,sort=True):
50
+ digit_images = []
51
+ global image_extensions
52
+ files = os.listdir(input_dir)
53
+ for file in files:
54
+ if file.endswith(".jpg"):#TODO check image
55
+ base,ext = os.path.splitext(file)
56
+ if not base.isdigit():
57
+ continue
58
+ digit_images.append(file)
59
+
60
+ if sort:
61
+ digit_images.sort()
62
+
63
+ return digit_images
64
+ def list_suffix_images(input_dir,suffix,is_digit=True,sort=True):
65
+ digit_images = []
66
+ global image_extensions
67
+ files = os.listdir(input_dir)
68
+ for file in files:
69
+ if file.endswith(".jpg"):#TODO check image
70
+ base,ext = os.path.splitext(file)
71
+ if base.endswith(suffix):
72
+ if is_digit:
73
+ if not base.replace(suffix,"").isdigit():
74
+ continue
75
+ digit_images.append(file)
76
+
77
+ if sort:
78
+ digit_images.sort()
79
+
80
+ return digit_images
81
+
82
+ import time
83
+
84
+ class ProgressTracker:
85
+ """
86
+ 処理の進捗状況を追跡し、経過時間と残り時間を表示するクラス。
87
+ """
88
+
89
+ def __init__(self,key, total_target):
90
+ """
91
+ コンストラクタ
92
+
93
+ Args:
94
+ total_target (int): 処理対象の総数
95
+ """
96
+ self.key = key
97
+ self.total_target = total_target
98
+ self.complete_target = 0
99
+ self.start_time = time.time()
100
+
101
+ def update(self):
102
+ """
103
+ 進捗を1つ進める。
104
+ 経過時間と残り時間を表示する。
105
+ """
106
+ self.complete_target += 1
107
+ current_time = time.time()
108
+ consumed_time = current_time - self.start_time
109
+ remain_time = (consumed_time / self.complete_target) * (self.total_target - self.complete_target) if self.complete_target > 0 else 0
110
+ print(f"stepped {self.key} {self.total_target} of {self.complete_target}, consumed {(consumed_time / 60):.1f} min, remain {(remain_time / 60):.1f} min")
111
+
112
+
glibvision/cv2_utils.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+ #2024-11-27 add copy image
5
+ #2024-12-04 plot_text = False,plot line first
6
+ #2024-11-30 copy paste
7
+ def draw_bbox(image,box,color=(255,0,0),thickness=1):
8
+ if thickness==0:
9
+ return
10
+
11
+ left = int(box[0])
12
+ top = int(box[1])
13
+ right = int(box[0]+box[2])
14
+ bottom = int(box[1]+box[3])
15
+ box_points =[(left,top),(right,top),(right,bottom),(left,bottom)]
16
+
17
+ cv2.polylines(image, [np.array(box_points)], isClosed=True, color=color, thickness=thickness)
18
+
19
+
20
+ def to_int_points(points):
21
+ int_points=[]
22
+ for point in points:
23
+ int_points.append([int(point[0]),int(point[1])])
24
+ return int_points
25
+
26
+ def draw_text(img, text, point, font_scale=0.5, color=(200, 200, 200), thickness=1):
27
+ font = cv2.FONT_HERSHEY_SIMPLEX
28
+ cv2.putText(img, str(text), point, font, font_scale, color, thickness, cv2.LINE_AA)
29
+
30
+ plot_text_color = (200, 200, 200)
31
+ plot_text_font_scale = 0.5
32
+ plot_index = 1
33
+ plot_text = False
34
+
35
+ def set_plot_text(is_plot,text_font_scale,text_color):
36
+ global plot_index,plot_text,plot_text_font_scale,plot_text_color
37
+ plot_text = is_plot
38
+ plot_index = 1
39
+ plot_text_font_scale = text_font_scale
40
+ plot_text_color = text_color
41
+
42
+ def plot_points(image,points,isClosed=False,circle_size=3,circle_color=(255,0,0),line_size=1,line_color=(0,0,255)):
43
+
44
+ global plot_index,plot_text
45
+ int_points = to_int_points(points)
46
+ if line_size>0:
47
+ cv2.polylines(image, [np.array(int_points)], isClosed=isClosed, color=line_color, thickness=line_size)
48
+ if circle_size>0:
49
+ for point in int_points:
50
+ cv2.circle(image,point,circle_size,circle_color,-1)
51
+ if plot_text:
52
+ draw_text(image,plot_index,point,plot_text_font_scale,plot_text_color)
53
+ plot_index+=1
54
+
55
+
56
+ def fill_points(image,points,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
57
+ np_points = np.array(points,dtype=np.int32)
58
+ cv2.fillPoly(image, [np_points], fill_color)
59
+ cv2.polylines(image, [np_points], isClosed=True, color=line_color, thickness=thickness)
60
+
61
+ def get_image_size(cv2_image):
62
+ return cv2_image.shape[:2]
63
+
64
+ def get_channel(np_array):
65
+ return np_array.shape[2] if np_array.ndim == 3 else 1
66
+
67
+ def get_numpy_text(np_array,key=""):
68
+ channel = get_channel(np_array)
69
+ return f"{key} shape = {np_array.shape} channel = {channel} ndim = {np_array.ndim} size = {np_array.size}"
70
+
71
+
72
+ def gray3d_to_2d(grayscale: np.ndarray) -> np.ndarray:
73
+ channel = get_channel(grayscale)
74
+ if channel!=1:
75
+ raise ValueError(f"color maybe rgb or rgba {get_numpy_text(grayscale)}")
76
+ """
77
+ 3 次元グレースケール画像 (チャンネル数 1) を 2 次元に変換する。
78
+
79
+ Args:
80
+ grayscale (np.ndarray): 3 次元グレースケール画像 (チャンネル数 1)。
81
+
82
+ Returns:
83
+ np.ndarray: 2 次元グレースケール画像。
84
+ """
85
+
86
+ if grayscale.ndim == 2:
87
+ return grayscale
88
+ return np.squeeze(grayscale)
89
+
90
+ def blend_rgb_images(image1: np.ndarray, image2: np.ndarray, mask: np.ndarray) -> np.ndarray:
91
+ """
92
+ 2 つの RGB 画像をマスク画像を使用してブレンドする。
93
+
94
+ Args:
95
+ image1 (np.ndarray): 最初の画像 (RGB)。
96
+ image2 (np.ndarray): 2 番目の画像 (RGB)。
97
+ mask (np.ndarray): マスク画像 (グレースケール)。
98
+
99
+ Returns:
100
+ np.ndarray: ブレンドされた画像 (RGB)。
101
+
102
+ Raises:
103
+ ValueError: 入力画像の形状が一致しない場合。
104
+ """
105
+
106
+ if image1.shape != image2.shape or image1.shape[:2] != mask.shape:
107
+ raise ValueError("入力画像の形状が一致しません。")
108
+
109
+ # 画像を float 型に変換
110
+ image1 = image1.astype(float)
111
+ image2 = image2.astype(float)
112
+
113
+ # マスクを 3 チャンネルに変換し、0-1 の範囲にスケール
114
+ alpha = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR).astype(float) / 255.0
115
+
116
+ # ブレンド計算
117
+ blended = (1 - alpha) * image1 + alpha * image2
118
+
119
+ return blended.astype(np.uint8)
120
+
121
+ def create_color_image(img,color=(255,255,255)):
122
+ mask = np.zeros_like(img)
123
+
124
+ h, w = img.shape[:2]
125
+ cv2.rectangle(mask, (0, 0), (w, h), color, -1)
126
+ return mask
127
+
128
+ def pil_to_bgr_image(image):
129
+ np_image = np.array(image, dtype=np.uint8)
130
+ if np_image.shape[2] == 4:
131
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RGBA2BGRA)
132
+ else:
133
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)
134
+ return bgr_img
135
+
136
+ def bgr_to_rgb(np_image):
137
+ if np_image.shape[2] == 4:
138
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RBGRA2RGBA)
139
+ else:
140
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_BGR2RGB)
141
+ return bgr_img
142
+
143
+ def copy_image(img1: np.ndarray, img2: np.ndarray, x: int, y: int) -> None:
144
+ # チャネル数と次元数のチェック
145
+ if img1.ndim != 3 or img2.ndim != 3:
146
+ raise ValueError("Both img1 and img2 must be 3-dimensional arrays.")
147
+ elif img1.shape[2] != img2.shape[2]:
148
+ raise ValueError(f"img1 and img2 must have the same number of channels. img1 has {img1.shape[2]} channels, but img2 has {img2.shape[1]} channels.")
149
+
150
+ # Type check
151
+ if not isinstance(img1, np.ndarray) or not isinstance(img2, np.ndarray):
152
+ raise TypeError("img1 and img2 must be NumPy arrays.")
153
+
154
+ if x>=0:
155
+ offset_x=0
156
+ w = min(img1.shape[1]-x,img2.shape[1])
157
+ else:
158
+ w = min(img1.shape[1],img2.shape[1]+x)
159
+ offset_x=int(-x)
160
+ x = 0
161
+
162
+ if y>=0:
163
+ h = min(img1.shape[0]-y,img2.shape[0])
164
+ offset_y=0
165
+ else:
166
+ h = min(img1.shape[0]-y,img2.shape[0]+y)
167
+ offset_y=int(-y)
168
+ y = 0
169
+ x=int(x)
170
+ y=int(y)
171
+ h=int(h)
172
+ w=int(w)
173
+
174
+
175
+ print(f"img1 {img1.shape} img2{img2.shape} x={x} y={y} w={w} h={h}")
176
+ # Paste the overlapping part
177
+ img1[y:y+h, x:x+w] = img2[offset_y:h+offset_y, offset_x:w+offset_x]
178
+
179
+ def crop(image,bbox):
180
+ x,y,width,height = bbox
181
+ return image[y:y+height, x:x+width]
182
+ #not check safe
183
+ def paste(image,replace_image,x,y):
184
+ height,width = replace_image.shape[:2]
185
+ image[y:y+height, x:x+width] = replace_image
glibvision/draw_utils.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DrawUtils
2
+ # not PIL,CV2,Numpy drawing method
3
+ import math
4
+ # 2024-11-29 add calculate_distance
5
+ def points_to_box(points):
6
+ x1=float('inf')
7
+ x2=0
8
+ y1=float('inf')
9
+ y2=0
10
+ for point in points:
11
+ if point[0]<x1:
12
+ x1=point[0]
13
+ if point[0]>x2:
14
+ x2=point[0]
15
+ if point[1]<y1:
16
+ y1=point[1]
17
+ if point[1]>y2:
18
+ y2=point[1]
19
+ return [x1,y1,x2-x1,y2-y1]
20
+
21
+ def box_to_point(box):
22
+ return [
23
+ [box[0],box[1]],
24
+ [box[0]+box[2],box[1]],
25
+ [box[0]+box[2],box[1]+box[3]],
26
+ [box[0],box[1]+box[3]]
27
+ ]
28
+
29
+ def plus_point(base_pt,add_pt):
30
+ return [base_pt[0]+add_pt[0],base_pt[1]+add_pt[1]]
31
+
32
+ def box_to_xy(box):
33
+ return [box[0],box[1],box[2]+box[0],box[3]+box[1]]
34
+
35
+ def to_int_points(points):
36
+ int_points=[]
37
+ for point in points:
38
+ int_points.append([int(point[0]),int(point[1])])
39
+ return int_points
40
+
41
+ def calculate_distance(xy, xy2):
42
+ return math.sqrt((xy2[0] - xy[0])**2 + (xy2[1] - xy[1])**2)
glibvision/glandmark_utils.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+
4
+ #simple single version
5
+ def bbox_to_glandmarks(file_name,bbox,points = None):
6
+ base,ext = os.path.splitext(file_name)
7
+ glandmark = {"image":{
8
+ "boxes":[{
9
+ "left":int(bbox[0]),"top":int(bbox[1]),"width":int(bbox[2]),"height":int(bbox[3])
10
+ }],
11
+ "file":file_name,
12
+ "id":int(base)
13
+ # width,height ignore here
14
+ }}
15
+ if points is not None:
16
+ parts=[
17
+ ]
18
+ for point in points:
19
+ parts.append({"x":int(point[0]),"y":int(point[1])})
20
+ glandmark["image"]["boxes"][0]["parts"] = parts
21
+ return glandmark
22
+
23
+ #technically this is not g-landmark/dlib ,
24
+ def convert_to_landmark_group_json(points):
25
+ if len(points)!=68:
26
+ print(f"points must be 68 but {len(points)}")
27
+ return None
28
+ new_points=list(points)
29
+
30
+ result = [ # possible multi person ,just possible any func support multi person
31
+
32
+ { # index start 0 but index-number start 1
33
+ "chin":new_points[0:17],
34
+ "left_eyebrow":new_points[17:22],
35
+ "right_eyebrow":new_points[22:27],
36
+ "nose_bridge":new_points[27:31],
37
+ "nose_tip":new_points[31:36],
38
+ "left_eye":new_points[36:42],
39
+ "right_eye":new_points[42:48],
40
+
41
+ # lip points customized structure
42
+ # MIT licensed face_recognition
43
+ # https://github.com/ageitgey/face_recognition
44
+ "top_lip":new_points[48:55]+[new_points[64]]+[new_points[63]]+[new_points[62]]+[new_points[61]]+[new_points[60]],
45
+ "bottom_lip":new_points[54:60]+[new_points[48]]+[new_points[60]]+[new_points[67]]+[new_points[66]]+[new_points[65]]+[new_points[64]],
46
+ }
47
+ ]
48
+ return result
glibvision/numpy_utils.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ #2024-12-03 rotate_point_euler
4
+ #2024-12-04 load_data
5
+ def load_data(filepath):
6
+ """
7
+ カンマ区切りのテキストファイルからデータをNumPy配列に読み込みます。
8
+
9
+ Args:
10
+ filepath: データファイルのパス
11
+
12
+ Returns:
13
+ NumPy配列: 読み込まれたデータ。エラーが発生した場合はNone。
14
+ """
15
+ try:
16
+ data = np.loadtxt(filepath, delimiter=",")
17
+ return data
18
+ except (FileNotFoundError, ValueError) as e:
19
+ print(f"Error loading data: {e}")
20
+ return None
21
+ def rotate_point_euler(point, angles,order="xyz"):
22
+ """
23
+ オイラー角を使って3Dポイントを回転させる関数
24
+
25
+ Args:
26
+ point: 回転させる3Dポイント (x, y, z)
27
+ angles: 各軸周りの回転角度 (rx, ry, rz) [ラジアン]
28
+
29
+ Returns:
30
+ 回転後の3Dポイント (x', y', z')
31
+ """
32
+
33
+ rx, ry, rz = angles
34
+ point = np.array(point)
35
+
36
+ # X軸周りの回転
37
+ Rx = np.array([
38
+ [1, 0, 0],
39
+ [0, np.cos(rx), -np.sin(rx)],
40
+ [0, np.sin(rx), np.cos(rx)]
41
+ ])
42
+
43
+ # Y軸周りの回転
44
+ Ry = np.array([
45
+ [np.cos(ry), 0, np.sin(ry)],
46
+ [0, 1, 0],
47
+ [-np.sin(ry), 0, np.cos(ry)]
48
+ ])
49
+
50
+ # Z軸周りの回転
51
+ Rz = np.array([
52
+ [np.cos(rz), -np.sin(rz), 0],
53
+ [np.sin(rz), np.cos(rz), 0],
54
+ [0, 0, 1]
55
+ ])
56
+
57
+ # 回転行列の合成 (Z軸 -> Y軸 -> X軸 の順で回転)
58
+ order = order.lower()
59
+ if order == "xyz":
60
+ R = Rx @ Ry @ Rz
61
+ elif order == "xzy":
62
+ R = Rx @ Rz @ Ry
63
+ elif order == "yxz":
64
+ R = Ry @ Rx @ Rz
65
+ elif order == "yzx":
66
+ R = Ry @ Rz @ Rx
67
+ elif order == "zxy":
68
+ R = Rz @ Rx @ Ry
69
+ else:#zyx
70
+ R = Rz @ Ry @ Rx
71
+
72
+
73
+
74
+ # 回転後のポイントを計算
75
+ rotated_point = R @ point
76
+
77
+ return rotated_point
78
+
79
+ def apply_binary_mask_to_color(base_image,color,mask):
80
+ """
81
+ 二値マスクを使用して、画像の一部を別の画像にコピーする。
82
+
83
+ Args:
84
+ base_image (np.ndarray): コピー先の画像。
85
+ paste_image (np.ndarray): コピー元の画像。
86
+ mask (np.ndarray): 二値マスク画像。
87
+
88
+ Returns:
89
+ np.ndarray: マスクを適用した画像。
90
+
91
+ """
92
+ # TODO check all shape
93
+ #print_numpy(base_image)
94
+ #print_numpy(paste_image)
95
+ #print_numpy(mask)
96
+ if mask.ndim == 2:
97
+ condition = mask == 255
98
+ else:
99
+ condition = mask[:,:,0] == 255
100
+
101
+ base_image[condition] = color
102
+ return base_image
103
+
104
+ def apply_binary_mask_to_image(base_image,paste_image,mask):
105
+ """
106
+ 二値マスクを使用して、画像の一部を別の画像にコピーする。
107
+
108
+ Args:
109
+ base_image (np.ndarray): コピー先の画像。
110
+ paste_image (np.ndarray): コピー元の画像。
111
+ mask (np.ndarray): 二値マスク画像。
112
+
113
+ Returns:
114
+ np.ndarray: マスクを適用した画像。
115
+
116
+ """
117
+ # TODO check all shape
118
+ #print_numpy(base_image)
119
+ #print_numpy(paste_image)
120
+ #print_numpy(mask)
121
+ if mask.ndim == 2:
122
+ condition = mask == 255
123
+ else:
124
+ condition = mask[:,:,0] == 255
125
+
126
+ base_image[condition] = paste_image[condition]
127
+ return base_image
128
+
129
+ def pil_to_numpy(image):
130
+ return np.array(image, dtype=np.uint8)
131
+
132
+ def extruce_points(points,index,ratio=1.5):
133
+ """
134
+ indexのポイントをratio倍だけ、点群の中心から、外側に膨らます。
135
+ """
136
+ center_point = np.mean(points, axis=0)
137
+ if index < 0 or index > len(points):
138
+ raise ValueError(f"index must be range(0,{len(points)} but value = {index})")
139
+ point1 =points[index]
140
+ print(f"center = {center_point}")
141
+ vec_to_center = point1 - center_point
142
+ return vec_to_center*ratio + center_point
143
+
144
+
145
+ def bulge_polygon(points, bulge_factor=0.1,isClosed=True):
146
+ """
147
+ ポリゴンの辺の中間に点を追加し、外側に膨らませる
148
+ ndarrayを返すので注意
149
+ """
150
+ # 入力 points を NumPy 配列に変換
151
+ points = np.array(points)
152
+
153
+ # ポリゴン全体の重心を求める
154
+ center_point = np.mean(points, axis=0)
155
+ #print(f"center = {center_point}")
156
+ new_points = []
157
+ num_points = len(points)
158
+ for i in range(num_points):
159
+ if i == num_points -1 and not isClosed:
160
+ break
161
+ p1 = points[i]
162
+ #print(f"p{i} = {p1}")
163
+ # 重心から頂点へのベクトル
164
+ #vec_to_center = p1 - center_point
165
+
166
+ # 辺のベクトルを求める
167
+ mid_diff = points[(i + 1) % num_points] - p1
168
+ mid = p1+(mid_diff/2)
169
+
170
+ #print(f"mid = {mid}")
171
+ out_vec = mid - center_point
172
+
173
+ # 重心からのベクトルに bulge_vec を加算
174
+ new_point = mid + out_vec * bulge_factor
175
+
176
+ new_points.append(p1)
177
+ new_points.append(new_point.astype(np.int32))
178
+
179
+ return np.array(new_points)
180
+
181
+
182
+ # image.shape rgb are (1024,1024,3) use 1024,1024 as 2-dimensional
183
+ def create_2d_image(shape):
184
+ grayscale_image = np.zeros(shape[:2], dtype=np.uint8)
185
+ return grayscale_image
glibvision/pil_utils.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image,ImageDraw
2
+ from .draw_utils import box_to_xy,to_int_points,box_to_point
3
+ #ver-2024-11-18
4
+ def create_color_image(width, height, color=(255,255,255)):
5
+ if color == None:
6
+ color = (0,0,0)
7
+
8
+ if len(color )== 3:
9
+ mode ="RGB"
10
+ elif len(color )== 4:
11
+ mode ="RGBA"
12
+
13
+ img = Image.new(mode, (width, height), color)
14
+ return img
15
+
16
+ # deprecated
17
+ def fill_points(image,points,color=(255,255,255)):
18
+ return draw_points(image,points,fill=color)
19
+
20
+ def draw_points(image,points,outline=None,fill=None,width=1):
21
+
22
+ draw = ImageDraw.Draw(image)
23
+ int_points = [(int(x), int(y)) for x, y in points]
24
+
25
+ if outline is not None or fill is not None:
26
+ draw.polygon(int_points, outline=outline,fill=fill,width=width)
27
+
28
+ return image
29
+
30
+ def draw_box(image,box,outline=None,fill=None):
31
+ points = to_int_points(box_to_point(box))
32
+ return draw_points(image,points,outline,fill)
33
+
34
+ def from_numpy(numpy_array):
35
+ return Image.fromarray(numpy_array)
glibvision/yolox_utils.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import json
3
+ import os
4
+
5
+ get_face_path = os.path.join("C:\\Users\\owner\\Documents\\pythons\\yolox-examples\\get_face_rect.py")
6
+
7
+ def detect_yolox_face(file_path):
8
+ command = ["python",get_face_path,"-i",file_path]
9
+ result=subprocess.run(command,capture_output=True, text=True)
10
+
11
+ json_data = json.loads(result.stdout)
12
+ #print(f"x {json_data[0]},y {json_data[1]},w {json_data[2]},h {json_data[3]}, ")
13
+ return json_data