|
import streamlit as st |
|
import openai |
|
import requests |
|
|
|
|
|
def query_chatgpt(prompt): |
|
openai.api_key = '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' |
|
response = openai.Completion.create( |
|
model="gpt-3.5-turbo", |
|
prompt=prompt, |
|
max_tokens=150 |
|
) |
|
return response.choices[0].text.strip() |
|
|
|
|
|
def query_gemini(prompt): |
|
api_key = "AIzaSyDqtrotGVDil7ffARikpCb3SO5yn0x-YXE" |
|
url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent" |
|
headers = { |
|
'Content-Type': 'application/json' |
|
} |
|
data = { |
|
"contents": [{ |
|
"parts": [{"text": prompt}] |
|
}] |
|
} |
|
|
|
|
|
response = requests.post(url, headers=headers, params={"key": api_key}, json=data) |
|
|
|
|
|
if response.status_code == 200: |
|
return response.json().get('content', 'No content returned') |
|
else: |
|
return f"Error: {response.status_code}, {response.text}" |
|
|
|
|
|
st.title("Compare AI Models: ChatGPT and Gemini") |
|
prompt = st.text_area("Enter your query:") |
|
|
|
if st.button("ChatGPT"): |
|
if prompt: |
|
chatgpt_response = query_chatgpt(prompt) |
|
st.subheader("ChatGPT Response:") |
|
st.write(chatgpt_response) |
|
else: |
|
st.warning("Please enter a prompt.") |
|
|
|
if st.button("Gemini"): |
|
if prompt: |
|
gemini_response = query_gemini(prompt) |
|
st.subheader("Gemini Response:") |
|
st.write(gemini_response) |
|
else: |
|
st.warning("Please enter a prompt.") |
|
|