typesdigital commited on
Commit
bbb9aa4
·
verified ·
1 Parent(s): 0015894

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -19
app.py CHANGED
@@ -2,24 +2,53 @@ import gradio as gr
2
  import google.generativeai as genai
3
  from PIL import Image
4
  import os
 
5
 
6
  # Configure the Gemini API
7
  genai.configure(api_key=os.environ.get("AIzaSyCFdxcKVO6VSxEBaNE2W3LIvRLPEPpyMGw"))
8
 
9
  # Set up the model
10
- model = genai.GenerativeModel('gemini-1.5-flash')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  def chat_with_gemini(history, user_message, image):
13
  history = history or []
14
 
15
- if image is not None:
16
- # If an image is uploaded, include it in the message
17
- response = model.generate_content([user_message, image])
18
- else:
19
- # Text-only message
20
- response = model.generate_content(user_message)
21
-
22
- history.append((user_message, response.text))
 
 
 
 
 
 
 
 
 
 
 
23
  return history, history
24
 
25
  def clear_conversation():
@@ -27,7 +56,7 @@ def clear_conversation():
27
 
28
  # Define the Gradio interface
29
  with gr.Blocks() as demo:
30
- chatbot = gr.Chatbot(label="Chat with Gemini")
31
  msg = gr.Textbox(label="Type your message here")
32
  clear = gr.Button("Clear")
33
  image_upload = gr.Image(type="pil", label="Upload an image (optional)")
@@ -37,12 +66,4 @@ with gr.Blocks() as demo:
37
 
38
  # Launch the app
39
  if __name__ == "__main__":
40
- demo.launch()
41
-
42
- # Requirements for Hugging Face Spaces
43
- # requirements.txt
44
- '''
45
- gradio==3.50.2
46
- google-generativeai==0.3.1
47
- Pillow==10.0.0
48
- '''
 
2
  import google.generativeai as genai
3
  from PIL import Image
4
  import os
5
+ import io
6
 
7
  # Configure the Gemini API
8
  genai.configure(api_key=os.environ.get("AIzaSyCFdxcKVO6VSxEBaNE2W3LIvRLPEPpyMGw"))
9
 
10
  # Set up the model
11
+ generation_config = {
12
+ "temperature": 1,
13
+ "top_p": 0.95,
14
+ "top_k": 64,
15
+ "max_output_tokens": 8192,
16
+ "response_mime_type": "text/plain",
17
+ }
18
+
19
+ model = genai.GenerativeModel(
20
+ model_name="gemini-1.5-flash",
21
+ generation_config=generation_config,
22
+ )
23
+
24
+ def image_to_byte_array(image: Image) -> bytes:
25
+ imgByteArr = io.BytesIO()
26
+ image.save(imgByteArr, format=image.format)
27
+ imgByteArr = imgByteArr.getvalue()
28
+ return imgByteArr
29
 
30
  def chat_with_gemini(history, user_message, image):
31
  history = history or []
32
 
33
+ try:
34
+ if image is not None:
35
+ # Convert image to byte array
36
+ image_bytes = image_to_byte_array(image)
37
+ # Create a Content object for the image
38
+ image_parts = [{"mime_type": "image/jpeg", "data": image_bytes}]
39
+ prompt_parts = [user_message] + image_parts
40
+ else:
41
+ prompt_parts = [user_message]
42
+
43
+ # Generate content
44
+ response = model.generate_content(prompt_parts)
45
+ response_text = response.text
46
+
47
+ history.append((user_message, response_text))
48
+ except Exception as e:
49
+ error_message = f"An error occurred: {str(e)}"
50
+ history.append((user_message, error_message))
51
+
52
  return history, history
53
 
54
  def clear_conversation():
 
56
 
57
  # Define the Gradio interface
58
  with gr.Blocks() as demo:
59
+ chatbot = gr.Chatbot(label="Chat with Gemini 1.5 Flash")
60
  msg = gr.Textbox(label="Type your message here")
61
  clear = gr.Button("Clear")
62
  image_upload = gr.Image(type="pil", label="Upload an image (optional)")
 
66
 
67
  # Launch the app
68
  if __name__ == "__main__":
69
+ demo.launch()