Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,49 +1,216 @@
|
|
1 |
-
|
2 |
-
import
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
!curl https://ollama.ai/install.sh | sh
|
2 |
+
import os
|
3 |
+
|
4 |
+
import subprocess
|
5 |
+
|
6 |
+
import time
|
7 |
+
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
import tensorflow as tf
|
13 |
+
|
14 |
+
from tensorflow.keras.preprocessing.image import load_img, img_to_array
|
15 |
+
|
16 |
+
import h5py
|
17 |
+
# Set the environment variable for the Ollama model
|
18 |
+
|
19 |
+
OLLAMA_MODEL = 'dolphin-mistral:v2.8'
|
20 |
+
|
21 |
+
os.environ['OLLAMA_MODEL'] = OLLAMA_MODEL
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
# Start Ollama as a background process
|
26 |
+
|
27 |
+
command = "nohup ollama serve &"
|
28 |
+
|
29 |
+
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
30 |
+
|
31 |
+
time.sleep(5) # Wait for the server to start
|
32 |
+
|
33 |
+
# Function to interact with the Ollama model
|
34 |
+
|
35 |
+
def query_model(input_text):
|
36 |
+
|
37 |
+
response = subprocess.run(
|
38 |
+
|
39 |
+
["ollama", "run", OLLAMA_MODEL, input_text],
|
40 |
+
|
41 |
+
stdout=subprocess.PIPE,
|
42 |
+
|
43 |
+
stderr=subprocess.PIPE,
|
44 |
+
|
45 |
+
text=True
|
46 |
+
|
47 |
+
)
|
48 |
+
|
49 |
+
return response.stdout if response.returncode == 0 else response.stderr
|
50 |
+
# Load the breast cancer detection model
|
51 |
+
|
52 |
+
incept_model = tf.keras.models.load_model('/content/sample_data/breast cancer/best_model_2.h5')
|
53 |
+
|
54 |
+
fixed_image_url = "/content/sample_data/breast cancer/static/breast-cancer-awareness-month-1200x834.jpg"
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
# Example images and their descriptions
|
59 |
+
|
60 |
+
examples = [
|
61 |
+
|
62 |
+
["/content/sample_data/breast cancer/malignant.png", "Malignant X-ray image."],
|
63 |
+
|
64 |
+
["/content/sample_data/breast cancer/normal.png", "X-ray image indicating normal."],
|
65 |
+
|
66 |
+
["/content/sample_data/breast cancer/benign.png", "X-ray image showing no signs of benign."]
|
67 |
+
|
68 |
+
]
|
69 |
+
|
70 |
+
IMAGE_SHAPE = (224, 224)
|
71 |
+
|
72 |
+
classes = ['benign', 'malignant', 'normal'] # Function to prepare the image for prediction
|
73 |
+
|
74 |
+
def prepare_image(file):
|
75 |
+
|
76 |
+
img = load_img(file, target_size=IMAGE_SHAPE)
|
77 |
+
|
78 |
+
img_array = img_to_array(img)
|
79 |
+
|
80 |
+
img_array = np.expand_dims(img_array, axis=0)
|
81 |
+
|
82 |
+
return tf.keras.applications.efficientnet.preprocess_input(img_array)
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
# Prediction function for breast cancer detection
|
87 |
+
|
88 |
+
def predict(file):
|
89 |
+
|
90 |
+
if file is None:
|
91 |
+
|
92 |
+
return "Please upload an image.", fixed_image_url
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
img = prepare_image(file)
|
97 |
+
|
98 |
+
res = incept_model.predict(img)
|
99 |
+
|
100 |
+
pred_index = np.argmax(res)
|
101 |
+
|
102 |
+
pred = classes[pred_index]
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
# Specific advice for each prediction
|
107 |
+
|
108 |
+
if pred == 'malignant':
|
109 |
+
|
110 |
+
advice = "As a healthcare professional, I recommend immediate further evaluation. Malignant findings can indicate the presence of cancer. Please consult a specialist."
|
111 |
+
|
112 |
+
elif pred == 'benign':
|
113 |
+
|
114 |
+
advice = "The results show benign characteristics, which is a positive outcome. This means there are no cancerous cells. However, it’s essential to have regular follow-ups with your healthcare provider to ensure that there are no changes over time."
|
115 |
+
|
116 |
+
else: # pred == 'normal'
|
117 |
+
|
118 |
+
advice = "The results appear normal. Continue with regular check-ups and maintain a healthy lifestyle."
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
return advice, fixed_image_url # Function to provide project information
|
123 |
+
|
124 |
+
def show_info():
|
125 |
+
|
126 |
+
return (
|
127 |
+
|
128 |
+
"<h3 style='text-align: center;'> 🎗️ Welcome to Our Breast Cancer System 🎗️ </h3>\n\n"
|
129 |
+
|
130 |
+
"Breast cancer is one of the most common causes of death among women worldwide.\n\n "
|
131 |
+
|
132 |
+
"Early detection plays a crucial role in reducing mortality rates.\n\n "
|
133 |
+
|
134 |
+
"This project includes two main components:\n\n"
|
135 |
+
|
136 |
+
"- **Ultrasound Image Classification**: \n\n We classify breast ultrasound images into three categories: normal, benign, and malignant. \n\n"
|
137 |
+
|
138 |
+
"The dataset consists of 780 ultrasound images collected in 2018 from 600 female patients, aged 25 to 75. \n\n "
|
139 |
+
|
140 |
+
" Each image is in PNG format with an average size of 500x500 pixels.\n\n\n"
|
141 |
+
|
142 |
+
"- **Breast Cancer Information Chatbot**: \n\n Our chatbot is designed to provide reliable information and answer questions about breast cancer, helping users to understand the disease better.\n\n\n"
|
143 |
+
|
144 |
+
"For additional assistance, you can interact with our chatbot or upload images for classification."
|
145 |
+
|
146 |
+
)
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
# Create the Gradio interface for both functionalities
|
151 |
+
|
152 |
+
chatbot_interface = gr.Interface(
|
153 |
+
|
154 |
+
fn=query_model,
|
155 |
+
|
156 |
+
inputs=gr.Textbox(label="Enter your question about breast cancer:", placeholder="e.g., What are the symptoms of breast cancer?", lines=2),
|
157 |
+
|
158 |
+
outputs=gr.Textbox(label="Response:", placeholder="Your answer will appear here..."),
|
159 |
+
|
160 |
+
title="Breast Cancer Chatbot 🎗️",
|
161 |
+
|
162 |
+
description="Ask your questions related to breast cancer. Our chatbot provides information and guidance based on your inquiries.",
|
163 |
+
|
164 |
+
)
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
breast_cancer_interface = gr.Interface(
|
169 |
+
|
170 |
+
fn=predict,
|
171 |
+
|
172 |
+
inputs=gr.Image(type="filepath", label="Upload an Image"),
|
173 |
+
|
174 |
+
outputs=[
|
175 |
+
|
176 |
+
gr.Textbox(label="Prediction"),
|
177 |
+
|
178 |
+
gr.Image(label="Your Partner in Breast Health Awareness 🎗️", value=fixed_image_url)
|
179 |
+
|
180 |
+
],
|
181 |
+
|
182 |
+
title="Breast Cancer Detection",
|
183 |
+
|
184 |
+
description="Predicting Your Breast Health: Is it Benign, Malignant, or Normal?",
|
185 |
+
|
186 |
+
examples=examples,
|
187 |
+
|
188 |
+
)
|
189 |
+
|
190 |
+
|
191 |
+
|
192 |
+
# Create the information display as a separate Markdown element
|
193 |
+
|
194 |
+
info_markdown = gr.Markdown(show_info())
|
195 |
+
|
196 |
+
|
197 |
+
|
198 |
+
# Combine interfaces into a themed Blocks app
|
199 |
+
|
200 |
+
with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.pink, secondary_hue=gr.themes.colors.neutral)) as demo:
|
201 |
+
|
202 |
+
combined_interface = gr.TabbedInterface(
|
203 |
+
|
204 |
+
[info_markdown,chatbot_interface, breast_cancer_interface],
|
205 |
+
|
206 |
+
["Project Information","Breast Cancer Chatbot", "Breast Cancer Detection"]
|
207 |
+
|
208 |
+
)
|
209 |
+
|
210 |
+
|
211 |
+
|
212 |
+
# Launch the combined interface
|
213 |
+
|
214 |
+
if __name__ == "__main__":
|
215 |
+
|
216 |
+
demo.launch(debug=True)
|