Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit
|
2 |
+
import langchain
|
3 |
+
from langchain.llms import GenerativeModel
|
4 |
+
|
5 |
+
# Replace with your Gemini API key
|
6 |
+
API_KEY = "AIzaSyBI2C5bFa0JsvCCyabbyANg8LUjcpqUiVM"
|
7 |
+
|
8 |
+
# Configure the model
|
9 |
+
llm = GenerativeModel("google-llm/text-davinci-003", api_key=API_KEY)
|
10 |
+
|
11 |
+
def generate_response(user_input):
|
12 |
+
"""
|
13 |
+
Sends user input to Gemini and returns its response.
|
14 |
+
|
15 |
+
Args:
|
16 |
+
user_input: The user's message.
|
17 |
+
|
18 |
+
Returns:
|
19 |
+
The generated response from Gemini.
|
20 |
+
"""
|
21 |
+
prompt ={user_input}
|
22 |
+
response = llm.generate_content(prompt=prompt)
|
23 |
+
return response.content[0]["text"]
|
24 |
+
|
25 |
+
while True:
|
26 |
+
user_input = input("enter your text")
|
27 |
+
if user_input.lower() == "quit":
|
28 |
+
break
|
29 |
+
response = generate_response(user_input)
|
30 |
+
st.write( {response})
|