Spaces:
Build error
Build error
Update app_dialogue.py
Browse files- app_dialogue.py +32 -10
app_dialogue.py
CHANGED
@@ -68,26 +68,34 @@ from PIL import Image
|
|
68 |
from PIL import Image
|
69 |
import tempfile
|
70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
def convert_to_rgb_pil(image):
|
72 |
print(f"***** convert_to_rgb_pil ******")
|
73 |
print(f"params: image is - {image}")
|
74 |
#if image.mode == "RGB":
|
75 |
# return image
|
76 |
# Save the converted image to a temporary file
|
77 |
-
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".jpg")
|
78 |
-
temp_file_path = temp_file.name
|
79 |
-
|
|
|
|
|
80 |
if image.mode != "RGB":
|
81 |
image_rgba = image.convert("RGBA")
|
82 |
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
|
83 |
alpha_composite = Image.alpha_composite(background, image_rgba)
|
84 |
alpha_composite = alpha_composite.convert("RGB")
|
85 |
-
alpha_composite.save(
|
86 |
else:
|
87 |
-
image.save(
|
88 |
|
89 |
temp_file.close()
|
90 |
-
print(f"# Return the path to the saved image as - {
|
91 |
return temp_file_path # Return the path to the saved image
|
92 |
|
93 |
|
@@ -99,6 +107,7 @@ def convert_to_rgb(filepath_or_pilimg):
|
|
99 |
|
100 |
if isinstance(filepath_or_pilimg, PIL.Image.Image):
|
101 |
return convert_to_rgb_pil(filepath_or_pilimg)
|
|
|
102 |
with Image.open(filepath_or_pilimg) as image:
|
103 |
# Check if the image is already in the RGB format
|
104 |
if image.mode == "RGB":
|
@@ -117,12 +126,14 @@ def convert_to_rgb(filepath_or_pilimg):
|
|
117 |
alpha_composite = alpha_composite.convert("RGB")
|
118 |
|
119 |
# Save the converted image to a temporary file
|
120 |
-
|
121 |
-
|
122 |
-
|
|
|
|
|
123 |
temp_file.close()
|
124 |
|
125 |
-
print(f"# Return the path to the saved image as - {
|
126 |
return temp_file_path # Return the path to the saved image
|
127 |
|
128 |
def pil_to_markdown_im(image):
|
@@ -811,6 +822,11 @@ And so, the story of Mulan and Shrek's romance came to an end, leaving a lasting
|
|
811 |
penalty_alpha,
|
812 |
):
|
813 |
# global processor, model, tokenizer
|
|
|
|
|
|
|
|
|
|
|
814 |
|
815 |
force_words = ""
|
816 |
hide_special_tokens = False
|
@@ -821,6 +837,9 @@ And so, the story of Mulan and Shrek's romance came to an end, leaving a lasting
|
|
821 |
history=chat_history,
|
822 |
)
|
823 |
|
|
|
|
|
|
|
824 |
generated_text = model_generation(
|
825 |
prompt_list=formated_prompt_list,
|
826 |
processor=processor,
|
@@ -848,6 +867,7 @@ And so, the story of Mulan and Shrek's romance came to an end, leaving a lasting
|
|
848 |
chat_history.append(
|
849 |
(user_prompt_list_to_markdown(user_prompt_list), generated_text.strip("<end_of_utterance>"))
|
850 |
)
|
|
|
851 |
else:
|
852 |
# Case where the image is passed through the Image Box.
|
853 |
# Convert the image into base64 for both passing it through the chat history and
|
@@ -858,9 +878,11 @@ And so, the story of Mulan and Shrek's romance came to an end, leaving a lasting
|
|
858 |
generated_text.strip("<end_of_utterance>"),
|
859 |
)
|
860 |
)
|
|
|
861 |
return "", None, chat_history
|
862 |
|
863 |
def process_example(message, image):
|
|
|
864 |
clear_msg, image_value, chat = model_inference(
|
865 |
user_prompt_str=message,
|
866 |
chat_history=[],
|
|
|
68 |
from PIL import Image
|
69 |
import tempfile
|
70 |
|
71 |
+
filename = f"{uuid.uuid4()}.jpg"
|
72 |
+
local_path = f"{filename}"
|
73 |
+
image.save(local_path)
|
74 |
+
img_str = f""
|
75 |
+
return img_str
|
76 |
+
|
77 |
def convert_to_rgb_pil(image):
|
78 |
print(f"***** convert_to_rgb_pil ******")
|
79 |
print(f"params: image is - {image}")
|
80 |
#if image.mode == "RGB":
|
81 |
# return image
|
82 |
# Save the converted image to a temporary file
|
83 |
+
#temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".jpg")
|
84 |
+
#temp_file_path = temp_file.name
|
85 |
+
filename = f"{uuid.uuid4()}.jpg"
|
86 |
+
local_path = f"{filename}"
|
87 |
+
|
88 |
if image.mode != "RGB":
|
89 |
image_rgba = image.convert("RGBA")
|
90 |
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
|
91 |
alpha_composite = Image.alpha_composite(background, image_rgba)
|
92 |
alpha_composite = alpha_composite.convert("RGB")
|
93 |
+
alpha_composite.save(local_path)
|
94 |
else:
|
95 |
+
image.save(local_path)
|
96 |
|
97 |
temp_file.close()
|
98 |
+
print(f"# Return the path to the saved image as - {local_path}")
|
99 |
return temp_file_path # Return the path to the saved image
|
100 |
|
101 |
|
|
|
107 |
|
108 |
if isinstance(filepath_or_pilimg, PIL.Image.Image):
|
109 |
return convert_to_rgb_pil(filepath_or_pilimg)
|
110 |
+
|
111 |
with Image.open(filepath_or_pilimg) as image:
|
112 |
# Check if the image is already in the RGB format
|
113 |
if image.mode == "RGB":
|
|
|
126 |
alpha_composite = alpha_composite.convert("RGB")
|
127 |
|
128 |
# Save the converted image to a temporary file
|
129 |
+
filename = f"{uuid.uuid4()}.jpg"
|
130 |
+
local_path = f"{filename}"
|
131 |
+
#temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".jpg")
|
132 |
+
#temp_file_path = temp_file.name
|
133 |
+
alpha_composite.save(local_path)
|
134 |
temp_file.close()
|
135 |
|
136 |
+
print(f"# Return the path to the saved image as - {local_path}")
|
137 |
return temp_file_path # Return the path to the saved image
|
138 |
|
139 |
def pil_to_markdown_im(image):
|
|
|
822 |
penalty_alpha,
|
823 |
):
|
824 |
# global processor, model, tokenizer
|
825 |
+
print("***********Model_inference*************")
|
826 |
+
print(f"Inside Model_inference, user_prompt_str is - {user_prompt_str} ")
|
827 |
+
print(f"Inside Model_inference, chat_history is - {chat_history} ")
|
828 |
+
print(f"Inside Model_inference, image type is - {type(image)} ")
|
829 |
+
print(f"Inside Model_inference, image is - {image} ")
|
830 |
|
831 |
force_words = ""
|
832 |
hide_special_tokens = False
|
|
|
837 |
history=chat_history,
|
838 |
)
|
839 |
|
840 |
+
print(f"formated_prompt_list (or resulting_list) is {formated_prompt_list}")
|
841 |
+
print(f"user_prompt_list (or [current_user_prompt_str]) is {user_prompt_list}")
|
842 |
+
|
843 |
generated_text = model_generation(
|
844 |
prompt_list=formated_prompt_list,
|
845 |
processor=processor,
|
|
|
867 |
chat_history.append(
|
868 |
(user_prompt_list_to_markdown(user_prompt_list), generated_text.strip("<end_of_utterance>"))
|
869 |
)
|
870 |
+
print(f"chat_history (IF image is None or is with fake token) is -{chat_history}")
|
871 |
else:
|
872 |
# Case where the image is passed through the Image Box.
|
873 |
# Convert the image into base64 for both passing it through the chat history and
|
|
|
878 |
generated_text.strip("<end_of_utterance>"),
|
879 |
)
|
880 |
)
|
881 |
+
int(f"chat_history (ELSE IF image is available) is -{chat_history}")
|
882 |
return "", None, chat_history
|
883 |
|
884 |
def process_example(message, image):
|
885 |
+
print("********* process_example **********")
|
886 |
clear_msg, image_value, chat = model_inference(
|
887 |
user_prompt_str=message,
|
888 |
chat_history=[],
|