Spaces:
Running
Running
import streamlit as st | |
from langchain_ollama import OllamaLLM | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
def main(): | |
# Set up the page | |
st.set_page_config(page_title="Nudge Generator - Gemma 2b", page_icon="orYx logo.png", layout="wide") | |
# Title and logo | |
col1, col2 = st.columns([3, 1]) | |
with col1: | |
st.title("Nudge Generator - Gemma 2b") | |
with col2: | |
st.image("orYx logo.png", use_column_width=True) | |
# Chat interface | |
st.markdown("---") | |
st.header("Chat Interface") | |
# Input for user-provided data | |
prompt = st.text_area("Enter the prompt here:") | |
# Initialize the models | |
gemma_model = OllamaLLM(model='gemma:2b') | |
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") | |
model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it") | |
# Button to generate the nudge | |
if st.button("Generate Nudge"): | |
if S_boss.strip(): | |
with st.spinner("Generating nudges..."): | |
# Generate the response using Ollama LLM | |
response = gemma_model.invoke(input=f"I want you to analyze the {prompt}. Which contains top 3 strengths or weaknesses of a person being assessed. You will generate nudges for improving upon these strengths or fixing upon these weaknesses. If you don't find any data, just respond as - No data available.") | |
st.success("Nudges generated successfully!") | |
st.text_area("Generated Nudges:", response, height=200) | |
else: | |
st.warning("Please enter data to generate nudges.") | |
if __name__ == "__main__": | |
main() | |