File size: 1,854 Bytes
23f0272 91d89bc 23f0272 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import streamlit as st
import openai
import requests
# Function to call ChatGPT (OpenAI)
def query_chatgpt(prompt):
openai.api_key = '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' # Add your OpenAI API Key
response = openai.Completion.create(
model="gpt-3.5-turbo", # Updated model
prompt=prompt,
max_tokens=150
)
return response.choices[0].text.strip()
# Function to query Gemini API (Google's Generative Language API)
def query_gemini(prompt):
api_key = "AIzaSyDqtrotGVDil7ffARikpCb3SO5yn0x-YXE" # Replace with your Gemini API Key
url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent"
headers = {
'Content-Type': 'application/json'
}
data = {
"contents": [{
"parts": [{"text": prompt}]
}]
}
# Make the API request
response = requests.post(url, headers=headers, params={"key": api_key}, json=data)
# Check if request is successful
if response.status_code == 200:
return response.json().get('content', 'No content returned')
else:
return f"Error: {response.status_code}, {response.text}"
# Streamlit layout
st.title("Compare AI Models: ChatGPT and Gemini")
prompt = st.text_area("Enter your query:")
if st.button("ChatGPT"):
if prompt:
chatgpt_response = query_chatgpt(prompt)
st.subheader("ChatGPT Response:")
st.write(chatgpt_response)
else:
st.warning("Please enter a prompt.")
if st.button("Gemini"):
if prompt:
gemini_response = query_gemini(prompt)
st.subheader("Gemini Response:")
st.write(gemini_response)
else:
st.warning("Please enter a prompt.")
|