gutai123's picture
Update app.py
30dff6f verified
raw
history blame
2.38 kB
import streamlit as st
from dotenv import load_dotenv
from transformers import pipeline
# Load environment variables
load_dotenv()
# Initialize the question-answering pipeline with a pre-trained model
qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
def fine_tune_extraction(content, query):
# Use the pipeline to answer the question using the document content
result = qa_pipeline(question=query, context=content)
return result['answer']
def main():
st.header("Automatic Ticket Classification Tool")
# Capture user input
st.write("We are here to help you, please ask your question:")
user_input = st.text_input("πŸ”")
if user_input:
try:
# Assuming create_embeddings and pull_from_pinecone are defined elsewhere
embeddings = create_embeddings()
# Fetch the Pinecone index using the API key and environment info
index = pull_from_pinecone(
"pcsk_4etRhj_Lc37c2KWzUgdTSPaShQKgxeZvC331qJcVWjK9LfpDARwkG23kXZoN5ZCHVLyYWZ",
"gcp-starter",
"ticket",
embeddings
)
# Fetch the relevant documents based on the user input
relevant_docs = get_similar_docs(index, user_input)
# Process and display relevant documents
if relevant_docs:
st.write("**Relevant Information:**")
for i, doc in enumerate(relevant_docs, 1):
# Access the document content directly
content = getattr(doc, "page_content", "No content available.") # Safely access content
# Apply fine-tuned extraction model to extract relevant information
relevant_info = fine_tune_extraction(content, user_input)
if relevant_info:
st.write(f"**Document {i}:**")
st.write(f"```{relevant_info}...```")
else:
st.write(f"**Document {i}:** No relevant information found.")
else:
st.write("No relevant documents found.")
except Exception as e:
st.write(f"An error occurred while processing your request: {e}")
# Ensure proper script execution entry point
if __name__ == "__main__":
main()