SalehAhmad commited on
Commit
ee83b99
1 Parent(s): 8507802
Files changed (4) hide show
  1. .gitignore +3 -1
  2. app.py +52 -27
  3. calculations.py +71 -17
  4. sizing.py +58 -27
.gitignore CHANGED
@@ -1 +1,3 @@
1
- env/
 
 
 
1
+ env/
2
+ ENV/
3
+ __pycache__/
app.py CHANGED
@@ -1,50 +1,75 @@
1
- import gradio as gr
2
- import pandas as pd
3
  import os
4
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
 
5
 
6
  import tensorflow as tf
 
7
  from tf_bodypix.api import download_model, load_model, BodyPixModelPaths
8
- from tf_bodypix.draw import draw_poses # utility function using OpenCV
9
  from tensorflow.keras import preprocessing
10
  import cv2
11
  import json
 
12
  import numpy as np
13
  from calculations import measure_body_sizes
 
 
14
 
15
  # Load BodyPix model
16
  bodypix_model = load_model(download_model(BodyPixModelPaths.MOBILENET_FLOAT_50_STRIDE_16))
17
 
18
- def process_images(front_img, side_img, height):
19
- # Convert images to image arrays
20
- front_image_array = preprocessing.image.img_to_array(front_img)
21
- side_image_array = preprocessing.image.img_to_array(side_img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- # BodyPix prediction
24
- result = bodypix_model.predict_single(front_image_array)
25
- mask = result.get_mask(threshold=0.75)
26
- # colored_mask = result.get_colored_part_mask(mask)
27
 
28
- poses = result.get_poses()
29
- print(f'shape of poses: {np.shape(poses)}')
30
- print(poses)
31
- # image_with_poses = draw_poses(
32
- # front_image_array.copy(), # create a copy to ensure we are not modifying the source image
33
- # poses,
34
- # keypoints_color=(255, 100, 100),
35
- # skeleton_color=(100, 100, 255)
36
- # )
37
 
38
- # Measure body sizes using poses and real height
39
- body_sizes = measure_body_sizes(poses, height)
40
- print(f'Body sizes: {body_sizes}')
 
 
 
 
41
 
42
- # Prepare the output images
43
- # front_image_with_poses = preprocessing.image.array_to_img(image_with_poses)
 
 
 
 
 
 
 
44
 
45
- # Convert measurements to DataFrame for display
46
- measurements_df = pd.DataFrame(body_sizes)
47
 
 
 
48
  return measurements_df
49
 
50
  # Create the Gradio interface
 
 
 
1
  import os
2
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
3
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
4
 
5
  import tensorflow as tf
6
+ import tf_bodypix
7
  from tf_bodypix.api import download_model, load_model, BodyPixModelPaths
8
+ from tf_bodypix.draw import draw_poses
9
  from tensorflow.keras import preprocessing
10
  import cv2
11
  import json
12
+ from matplotlib import pyplot as plt
13
  import numpy as np
14
  from calculations import measure_body_sizes
15
+ import gradio as gr
16
+ import pandas as pd
17
 
18
  # Load BodyPix model
19
  bodypix_model = load_model(download_model(BodyPixModelPaths.MOBILENET_FLOAT_50_STRIDE_16))
20
 
21
+ rainbow = [
22
+ [110, 64, 170], [143, 61, 178], [178, 60, 178], [210, 62, 167],
23
+ [238, 67, 149], [255, 78, 125], [255, 94, 99], [255, 115, 75],
24
+ [255, 140, 56], [239, 167, 47], [217, 194, 49], [194, 219, 64],
25
+ [175, 240, 91], [135, 245, 87], [96, 247, 96], [64, 243, 115],
26
+ [40, 234, 141], [28, 219, 169], [26, 199, 194], [33, 176, 213],
27
+ [47, 150, 224], [65, 125, 224], [84, 101, 214], [99, 81, 195]
28
+ ]
29
+
30
+ def process_images(front_img, side_img, real_height_cm):
31
+ fimage_array = preprocessing.image.img_to_array(front_img)
32
+ simage_array = preprocessing.image.img_to_array(side_img)
33
+
34
+ # bodypix prediction
35
+ frontresult = bodypix_model.predict_single(fimage_array)
36
+ sideresult = bodypix_model.predict_single(simage_array)
37
+
38
+ front_mask = frontresult.get_mask(threshold=0.75)
39
+ side_mask = sideresult.get_mask(threshold=0.75)
40
+
41
+ # preprocessing.image.save_img(f'{output_path}/frontbodypix-mask.jpg',front_mask)
42
+ # preprocessing.image.save_img(f'{output_path}/sidebodypix-mask.jpg',side_mask)
43
 
44
+ front_colored_mask = frontresult.get_colored_part_mask(front_mask, rainbow)
45
+ side_colored_mask = sideresult.get_colored_part_mask(side_mask, rainbow)
 
 
46
 
47
+ # preprocessing.image.save_img(f'{output_path}/frontbodypix-colored-mask.jpg',front_colored_mask)
48
+ # preprocessing.image.save_img(f'{output_path}/sidebodypix-colored-mask.jpg',side_colored_mask)
 
 
 
 
 
 
 
49
 
50
+ frontposes = frontresult.get_poses()
51
+ front_image_with_poses = draw_poses(
52
+ fimage_array.copy(), # create a copy to ensure we are not modifing the source image
53
+ frontposes,
54
+ keypoints_color=(255, 100, 100),
55
+ skeleton_color=(100, 100, 255)
56
+ )
57
 
58
+ sideposes = sideresult.get_poses()
59
+ side_image_with_poses = draw_poses(
60
+ simage_array.copy(), # create a copy to ensure we are not modifing the source image
61
+ sideposes,
62
+ keypoints_color=(255, 100, 100),
63
+ skeleton_color=(100, 100, 255)
64
+ )
65
+ # print(np.array(simage).shape)
66
+ # print(np.array(side_colored_mask).shape)
67
 
68
+ # preprocessing.image.save_img(f'{output_path}/frontbodypix-poses.jpg', front_image_with_poses)
69
+ # preprocessing.image.save_img(f'{output_path}/sidebodypix-poses.jpg', side_image_with_poses)
70
 
71
+ body_sizes = measure_body_sizes(side_colored_mask, front_colored_mask, sideposes, frontposes, real_height_cm, rainbow)
72
+ measurements_df = pd.DataFrame([body_sizes[0]])
73
  return measurements_df
74
 
75
  # Create the Gradio interface
calculations.py CHANGED
@@ -1,4 +1,8 @@
1
  import math
 
 
 
 
2
 
3
  def euclidean_distance(point1, point2):
4
  return math.sqrt((point2[0] - point1[0]) ** 2 + (point2[1] - point1[1]) ** 2)
@@ -7,39 +11,89 @@ def convert_to_real_measurements(pixel_measurement, pixel_height, real_height_cm
7
  height_ratio = real_height_cm / pixel_height
8
  return pixel_measurement * height_ratio
9
 
10
- def measure_body_sizes(poses, real_height_cm):
11
  """Measure various body sizes based on detected poses."""
12
  measurements = []
13
- for pose in poses:
 
 
14
  keypoints = pose[0] # This should directly give us the dictionary
15
-
16
  # Extract positions directly from keypoints
 
 
 
 
17
  left_shoulder = keypoints[5].position
18
  right_shoulder = keypoints[6].position
 
 
 
 
19
  left_hip = keypoints[11].position
20
  right_hip = keypoints[12].position
 
 
21
  left_ankle = keypoints[15].position
22
  right_ankle = keypoints[16].position
23
- left_wrist = keypoints[9].position
24
- right_wrist = keypoints[10].position
25
- left_eye = keypoints[1].position
26
- right_eye = keypoints[2].position
27
 
28
  # Calculate pixel height (from the top of the head to the bottom of the ankle)
29
  pixel_height = euclidean_distance((left_eye.x, left_eye.y), (left_ankle.x, left_ankle.y))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- # Calculate other pixel measurements
32
- shoulder_width_pixels = euclidean_distance((left_shoulder.x, left_shoulder.y), (right_shoulder.x, right_shoulder.y))
33
- leg_length_pixels = euclidean_distance((left_hip.x, left_hip.y), (left_ankle.x, left_ankle.y))
34
- arm_length_pixels = euclidean_distance((left_shoulder.x, left_shoulder.y), (left_wrist.x, left_wrist.y))
35
- shoulder_to_waist_pixels = euclidean_distance((left_shoulder.x, left_shoulder.y), (left_hip.x, left_hip.y))
36
 
37
  # Convert pixel measurements to real measurements using the height ratio
38
  measurements.append({
39
- "shoulder_width_cm": convert_to_real_measurements(shoulder_width_pixels, pixel_height, real_height_cm),
40
- "leg_length_cm": convert_to_real_measurements(leg_length_pixels, pixel_height, real_height_cm),
41
- "arm_length_cm": convert_to_real_measurements(arm_length_pixels, pixel_height, real_height_cm),
42
- "shoulder_to_waist_cm": convert_to_real_measurements(shoulder_to_waist_pixels, pixel_height, real_height_cm)
 
 
43
  })
44
 
45
- return measurements
 
1
  import math
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ import cv2
5
+ from tensorflow.keras import preprocessing
6
 
7
  def euclidean_distance(point1, point2):
8
  return math.sqrt((point2[0] - point1[0]) ** 2 + (point2[1] - point1[1]) ** 2)
 
11
  height_ratio = real_height_cm / pixel_height
12
  return pixel_measurement * height_ratio
13
 
14
+ def measure_body_sizes(side_colored_mask, front_colored_mask, sideposes, frontposes, real_height_cm, rainbow):
15
  """Measure various body sizes based on detected poses."""
16
  measurements = []
17
+
18
+ for pose in frontposes:
19
+ # Assuming each `pose` is a dictionary with 'keypoints' that are already in the required format
20
  keypoints = pose[0] # This should directly give us the dictionary
21
+
22
  # Extract positions directly from keypoints
23
+ left_eye = keypoints[1].position
24
+ right_eye = keypoints[2].position
25
+ nose = keypoints[3].position
26
+ right_ear = keypoints[4].position
27
  left_shoulder = keypoints[5].position
28
  right_shoulder = keypoints[6].position
29
+ left_elbow = keypoints[7].position
30
+ right_elbow = keypoints[8].position
31
+ left_wrist = keypoints[9].position
32
+ right_wrist = keypoints[10].position
33
  left_hip = keypoints[11].position
34
  right_hip = keypoints[12].position
35
+ left_knee = keypoints[13].position
36
+ right_knee = keypoints[14].position
37
  left_ankle = keypoints[15].position
38
  right_ankle = keypoints[16].position
 
 
 
 
39
 
40
  # Calculate pixel height (from the top of the head to the bottom of the ankle)
41
  pixel_height = euclidean_distance((left_eye.x, left_eye.y), (left_ankle.x, left_ankle.y))
42
+
43
+
44
+ shoulder_width_cm = convert_to_real_measurements(
45
+ euclidean_distance((left_shoulder.x, left_shoulder.y),(right_shoulder.x, right_shoulder.y)),
46
+ pixel_height, real_height_cm
47
+ )
48
+
49
+ # arm_length_cm = convert_to_real_measurements(
50
+ # euclidean_distance((right_shoulder.x, right_shoulder.y), (right_elbow.x, right_elbow.y)),
51
+ # pixel_height, real_height_cm
52
+ # ) + convert_to_real_measurements(
53
+ # euclidean_distance((right_elbow.x, right_elbow.y), (right_wrist.x, right_wrist.y)),
54
+ # pixel_height, real_height_cm
55
+ # )
56
+
57
+ # leg_length_cm = convert_to_real_measurements(
58
+ # euclidean_distance((left_hip.x, left_hip.y), (left_knee.x, left_knee.y)),
59
+ # pixel_height, real_height_cm
60
+ # ) + convert_to_real_measurements(
61
+ # euclidean_distance((left_knee.x, left_knee.y), (left_ankle.x, left_ankle.y)),
62
+ # pixel_height, real_height_cm
63
+ # )
64
+
65
+ arm_length_cm = convert_to_real_measurements(
66
+ euclidean_distance((left_shoulder.x, left_shoulder.y), (left_wrist.x, left_wrist.y)),
67
+ pixel_height, real_height_cm
68
+ )
69
+
70
+ leg_length_cm = convert_to_real_measurements(
71
+ euclidean_distance((left_hip.x, left_hip.y), (left_ankle.x, right_ankle.y)),
72
+ pixel_height, real_height_cm
73
+ )
74
+
75
+ shoulder_to_waist_cm = convert_to_real_measurements(
76
+ euclidean_distance((left_shoulder.x, left_shoulder.y), (left_hip.x, left_hip.y)),
77
+ pixel_height, real_height_cm
78
+ )
79
+
80
+ # Calculate waist circumference using the ellipse circumference formula
81
+ a = euclidean_distance((left_hip.x, left_hip.y), (right_hip.x, right_hip.y)) / 2
82
+ # b = euclidean_distance((), ()) / 2
83
+
84
+ # Use Ramanujan's approximation for the circumference of an ellipse
85
+ # waist_circumference_px = math.pi * (3*(a + b) - math.sqrt((3*a + b)*(a + 3*b)))
86
+ waist_circumference_cm = 90 #convert_to_real_measurements(waist_circumference_px, pixel_height, real_height_cm)
87
 
 
 
 
 
 
88
 
89
  # Convert pixel measurements to real measurements using the height ratio
90
  measurements.append({
91
+ "shoulder_width_cm": shoulder_width_cm,
92
+ "leg_length_cm": leg_length_cm,
93
+ "arm_length_cm": arm_length_cm,
94
+ "shoulder_to_waist_cm": shoulder_to_waist_cm,
95
+ "height_cm": real_height_cm,
96
+ "waist_circumference_cm": waist_circumference_cm
97
  })
98
 
99
+ return measurements
sizing.py CHANGED
@@ -1,12 +1,11 @@
1
  import os
2
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
3
-
4
- import torch
5
- torch.set_default_device('cuda:0')
6
 
7
  import tensorflow as tf
 
8
  from tf_bodypix.api import download_model, load_model, BodyPixModelPaths
9
- from tf_bodypix.draw import draw_poses # utility function using OpenCV
10
  from tensorflow.keras import preprocessing
11
  import cv2
12
  import json
@@ -16,47 +15,79 @@ from calculations import measure_body_sizes
16
 
17
  bodypix_model = load_model(download_model(BodyPixModelPaths.MOBILENET_FLOAT_50_STRIDE_16))
18
 
19
- input_path = 'input1/files/6'
20
- testfile = 'front_img.jpg'
 
21
  output_path = 'output'
 
 
 
 
 
 
 
 
 
 
22
 
23
- image = preprocessing.image.load_img(input_path+'/'+testfile)
 
24
 
25
  # image converted to image array
26
- image_array = preprocessing.image.img_to_array(image)
 
27
 
28
  # bodypix prediction
29
- result = bodypix_model.predict_single(image_array)
30
- mask = result.get_mask(threshold=0.75)
31
- preprocessing.image.save_img(f'{output_path}/bodypix-mask.jpg',mask)
 
 
32
 
33
- colored_mask = result.get_colored_part_mask(mask)
34
- print(np.array(image).shape)
35
 
36
- print(colored_mask.shape)
37
- preprocessing.image.save_img(f'{output_path}/bodypix-colored-mask.jpg',colored_mask)
38
 
39
- poses = result.get_poses()
40
- image_with_poses = draw_poses(
41
- image_array.copy(), # create a copy to ensure we are not modifing the source image
42
- poses,
 
 
 
 
43
  keypoints_color=(255, 100, 100),
44
  skeleton_color=(100, 100, 255)
45
  )
46
- # print(poses)
47
- preprocessing.image.save_img(f'{output_path}/bodypix-poses.jpg', image_with_poses)
48
 
49
- real_height_cm = 155.0 # Replace with the real height in cm
50
- body_sizes = 3 #measure_body_sizes(poses, real_height_cm)
51
- print(body_sizes)
 
 
 
 
 
 
52
 
53
 
 
 
 
 
 
54
 
55
- # Define the file name
56
- file_name = './output/measurements.json'
 
 
 
57
 
 
58
  # Open the file in write mode and save the dictionary as JSON
59
  with open(file_name, 'w') as json_file:
60
  json.dump(body_sizes, json_file, indent=4)
61
 
62
- print(f"body_sizes saved to {file_name}")
 
1
  import os
2
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
3
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
 
 
4
 
5
  import tensorflow as tf
6
+ import tf_bodypix
7
  from tf_bodypix.api import download_model, load_model, BodyPixModelPaths
8
+ from tf_bodypix.draw import draw_poses
9
  from tensorflow.keras import preprocessing
10
  import cv2
11
  import json
 
15
 
16
  bodypix_model = load_model(download_model(BodyPixModelPaths.MOBILENET_FLOAT_50_STRIDE_16))
17
 
18
+ input_path = 'input1/files/20'
19
+ front_image = 'front_img.jpg'
20
+ side_image = 'side_img.jpg'
21
  output_path = 'output'
22
+ real_height_cm = 173.0 # Replace with the real height in cm
23
+
24
+ rainbow = [
25
+ [110, 64, 170], [143, 61, 178], [178, 60, 178], [210, 62, 167],
26
+ [238, 67, 149], [255, 78, 125], [255, 94, 99], [255, 115, 75],
27
+ [255, 140, 56], [239, 167, 47], [217, 194, 49], [194, 219, 64],
28
+ [175, 240, 91], [135, 245, 87], [96, 247, 96], [64, 243, 115],
29
+ [40, 234, 141], [28, 219, 169], [26, 199, 194], [33, 176, 213],
30
+ [47, 150, 224], [65, 125, 224], [84, 101, 214], [99, 81, 195]
31
+ ]
32
 
33
+ fimage = preprocessing.image.load_img(input_path+'/'+front_image)
34
+ simage = preprocessing.image.load_img(input_path+'/'+side_image)
35
 
36
  # image converted to image array
37
+ fimage_array = preprocessing.image.img_to_array(fimage)
38
+ simage_array = preprocessing.image.img_to_array(simage)
39
 
40
  # bodypix prediction
41
+ frontresult = bodypix_model.predict_single(fimage_array)
42
+ sideresult = bodypix_model.predict_single(simage_array)
43
+
44
+ front_mask = frontresult.get_mask(threshold=0.75)
45
+ side_mask = sideresult.get_mask(threshold=0.75)
46
 
47
+ preprocessing.image.save_img(f'{output_path}/frontbodypix-mask.jpg',front_mask)
48
+ preprocessing.image.save_img(f'{output_path}/sidebodypix-mask.jpg',side_mask)
49
 
50
+ front_colored_mask = frontresult.get_colored_part_mask(front_mask, rainbow)
51
+ side_colored_mask = sideresult.get_colored_part_mask(side_mask, rainbow)
52
 
53
+ print(front_colored_mask.shape)
54
+ preprocessing.image.save_img(f'{output_path}/frontbodypix-colored-mask.jpg',front_colored_mask)
55
+ preprocessing.image.save_img(f'{output_path}/sidebodypix-colored-mask.jpg',side_colored_mask)
56
+
57
+ frontposes = frontresult.get_poses()
58
+ front_image_with_poses = draw_poses(
59
+ fimage_array.copy(), # create a copy to ensure we are not modifing the source image
60
+ frontposes,
61
  keypoints_color=(255, 100, 100),
62
  skeleton_color=(100, 100, 255)
63
  )
 
 
64
 
65
+ sideposes = sideresult.get_poses()
66
+ side_image_with_poses = draw_poses(
67
+ simage_array.copy(), # create a copy to ensure we are not modifing the source image
68
+ sideposes,
69
+ keypoints_color=(255, 100, 100),
70
+ skeleton_color=(100, 100, 255)
71
+ )
72
+ print(np.array(simage).shape)
73
+ print(np.array(side_colored_mask).shape)
74
 
75
 
76
+ preprocessing.image.save_img(f'{output_path}/frontbodypix-poses.jpg', front_image_with_poses)
77
+ preprocessing.image.save_img(f'{output_path}/sidebodypix-poses.jpg', side_image_with_poses)
78
+
79
+ body_sizes = measure_body_sizes(side_colored_mask, front_colored_mask, sideposes, frontposes, real_height_cm, rainbow)
80
+ print(body_sizes)
81
 
82
+ print(np.shape(body_sizes))
83
+ print(type(body_sizes))
84
+ print(body_sizes[0])
85
+ import pandas as pd
86
+ print(pd.DataFrame([body_sizes[0]]))
87
 
88
+ file_name = "output/measurements.json"
89
  # Open the file in write mode and save the dictionary as JSON
90
  with open(file_name, 'w') as json_file:
91
  json.dump(body_sizes, json_file, indent=4)
92
 
93
+ print(f"body_sizes saved to {output_path}")