Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,34 +1,52 @@
|
|
1 |
|
2 |
import os
|
3 |
-
|
4 |
import subprocess
|
5 |
-
|
6 |
import time
|
7 |
-
|
8 |
import gradio as gr
|
9 |
-
|
10 |
import numpy as np
|
11 |
-
|
12 |
import tensorflow as tf
|
13 |
-
|
14 |
from tensorflow.keras.preprocessing.image import load_img, img_to_array
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
# Start Ollama as a background process
|
26 |
-
|
27 |
-
command = "nohup ollama serve &"
|
28 |
-
|
29 |
-
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
30 |
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
f = h5py.File("best_model_2.h5", mode="r+")
|
34 |
model_config_string = f.attrs.get("model_config")
|
|
|
1 |
|
2 |
import os
|
|
|
3 |
import subprocess
|
|
|
4 |
import time
|
|
|
5 |
import gradio as gr
|
|
|
6 |
import numpy as np
|
|
|
7 |
import tensorflow as tf
|
|
|
8 |
from tensorflow.keras.preprocessing.image import load_img, img_to_array
|
9 |
+
import h5py
|
10 |
+
import google.generativeai as genai
|
11 |
+
|
12 |
+
genai.configure(api_key="AIzaSyAzv4VAm5hsr-Lek3ARywH6wcY6zEGVhSw")
|
13 |
+
|
14 |
+
# Set up generation configuration
|
15 |
+
generation_config = {
|
16 |
+
"temperature": 1,
|
17 |
+
"top_p": 0.95,
|
18 |
+
"top_k": 0,
|
19 |
+
"max_output_tokens": 8192,
|
20 |
+
}
|
21 |
+
|
22 |
+
# Safety settings to filter harmful content
|
23 |
+
safety_settings = [
|
24 |
+
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
25 |
+
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
26 |
+
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
27 |
+
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
28 |
+
]
|
29 |
|
30 |
+
# Set system message for the model
|
31 |
+
system_instruction = (
|
32 |
+
"You are Dr. Bot, a medical assistant specializing in breast cancer. "
|
33 |
+
"Your role is to provide accurate information about breast cancer types, symptoms, "
|
34 |
+
"screening guidelines, treatment options, and supportive resources. "
|
35 |
+
"Offer compassionate support and respond to patient inquiries with empathy and evidence-based information."
|
36 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
# Load Gemini model
|
39 |
+
model = genai.GenerativeModel(
|
40 |
+
model_name="gemini-1.5-pro-latest",
|
41 |
+
generation_config=generation_config,
|
42 |
+
system_instruction=system_instruction,
|
43 |
+
safety_settings=safety_settings
|
44 |
+
)
|
45 |
+
# Function to interact with the Gemini model
|
46 |
+
def query_model(input_text):
|
47 |
+
convo = model.start_chat(history=[{"role": "user", "parts": [input_text]}])
|
48 |
+
response = convo.send_message("YOUR_USER_INPUT")
|
49 |
+
return convo.last.text
|
50 |
|
51 |
f = h5py.File("best_model_2.h5", mode="r+")
|
52 |
model_config_string = f.attrs.get("model_config")
|