File size: 1,268 Bytes
d3d39e7
 
30dff6f
0ea00b5
d3d39e7
30dff6f
27569fb
30dff6f
869540c
30dff6f
 
869540c
 
27569fb
869540c
27569fb
 
 
 
6a153ca
869540c
 
6a153ca
869540c
 
 
 
 
 
 
793190e
869540c
 
 
6a153ca
869540c
6a153ca
793190e
20f2d79
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import streamlit as st
from dotenv import load_dotenv
from transformers import pipeline
from user_utils import *

# Load environment variables


# Initialize the question-answering pipeline with a pre-trained model

def main():
    load_dotenv()

    st.header("Automatic Ticket Classification Tool")
    #Capture user input
    st.write("We are here to help you, please ask your question:")
    user_input = st.text_input("πŸ”")

    if user_input:

        #creating embeddings instance...
        embeddings=create_embeddings()

        #Function to pull index data from Pinecone
        import os
        #We are fetching the previously stored Pinecome environment variable key in "Load_Data_Store.py" file
        index=pull_from_pinecone("pcsk_4etRhj_Lc37c2KWzUgdTSPaShQKgxeZvC331qJcVWjK9LfpDARwkG23kXZoN5ZCHVLyYWZ","us-east-1","ticket",embeddings)
        
        #This function will help us in fetching the top relevent documents from our vector store - Pinecone Index
        relavant_docs=get_similar_docs(index,user_input)

        #This will return the fine tuned response by LLM
        response=get_answer(relavant_docs,user_input)
        st.write(response)

        
# Ensure proper script execution entry point
if __name__ == "__main__":
    main()