Spaces:
Running
Running
import streamlit as st | |
from aiutils import ask_gemini, ask_gpt, ask_llama | |
def main(): | |
# App title | |
st.title("Three-way Comparison of AI Models") | |
with st.expander("ℹ️ About"): | |
st.write( | |
""" | |
This app compares the outputs of three AI models: Gemini 2.0, | |
OpenAI GPT 4.0, and Meta Llama 3.2 on a given prompt. The response time of each model is also displayed. | |
""" | |
) | |
st.write("Programmed by Louie F. Cervantes, M.Eng.(Information Engineering).") | |
# Step 1: Task selection | |
st.header("Select Task") | |
task = st.selectbox( | |
"What do you want to generate?", | |
["Generate a syllabus", "Generate an exam", "Create a program"], | |
) | |
# Step 2: Generate prompt | |
prompt = "" | |
if task == "Generate a syllabus": | |
prompt = """Generate a syllabus for the couse Intelligent Systems | |
for 3rd year Computers Science students of the College of ICT of | |
West Visayas State University. The course with use the book | |
'Building Intelligent Systems Using Machine Learning and | |
Deep Learning'. CCS 229 is a 3-unit lecture | |
course that will run for 18 weeks (54 hours).""" | |
elif task == "Generate an exam": | |
prompt = """Generate a 10-item multiple choice test with 4 options | |
on the topic Machine Learning. The test should require higher order | |
thinking skills and should be appropriate for 3rd year | |
Computer Science students.""" | |
elif task == "Create a program": | |
prompt = """Create a streamlit app that generates a random password | |
that complies with the following requirements: 1) at least 8 characters | |
2) at least one uppercase letter 3) at least one lowercase letter | |
4) at least one number 5) at least one special character.""" | |
st.write(f"Prompt: {prompt}") | |
if st.button("Ask AI Models"): | |
st.write("Please wait. This is a compute intensive process...") | |
with st.spinner("Asking AI Models..."): | |
# Dummy responses for AI models | |
gemini_response, gemini_rt = ask_gemini(prompt) | |
gpt_response, gpt_rt = ask_gpt(prompt) | |
llama_response, llama_rt = ask_llama(prompt) | |
# Step 3: Display responses in tabs | |
st.header("AI Model Outputs") | |
tab1, tab2, tab3 = st.tabs(["Gemini 2.0", "OpenAI GPT 4.0", "Meta Llama 3.2"]) | |
with tab1: | |
st.subheader("Gemini 2.0") | |
st.write(f"Response Time: {gemini_rt}") | |
st.write(gemini_response) | |
with tab2: | |
st.subheader("OpenAI GPT 4.0") | |
st.write(f"Response Time: {gpt_rt}") | |
st.write(gpt_response) | |
with tab3: | |
st.subheader("Meta Llama 3.2") | |
st.write(f"Response Time: {llama_rt}") | |
st.write(llama_response) | |
if __name__ == "__main__": | |
main() |