File size: 2,349 Bytes
d3d39e7
 
30dff6f
0ea00b5
d3d39e7
30dff6f
367cc4c
 
 
 
 
 
27569fb
30dff6f
869540c
30dff6f
 
869540c
 
27569fb
869540c
27569fb
 
 
 
6a153ca
869540c
 
6a153ca
869540c
 
 
 
 
 
 
793190e
869540c
 
 
6a153ca
b57c424
 
 
367cc4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b57c424
869540c
6a153ca
793190e
20f2d79
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import streamlit as st
from dotenv import load_dotenv
from transformers import pipeline
from user_utils import *

# Load environment variables
if 'HR_tickets' not in st.session_state:
    st.session_state['HR_tickets'] =[]
if 'IT_tickets' not in st.session_state:
    st.session_state['IT_tickets'] =[]
if 'Transport_tickets' not in st.session_state:
    st.session_state['Transport_tickets'] =[]


# Initialize the question-answering pipeline with a pre-trained model

def main():
    load_dotenv()

    st.header("Automatic Ticket Classification Tool")
    #Capture user input
    st.write("We are here to help you, please ask your question:")
    user_input = st.text_input("πŸ”")

    if user_input:

        #creating embeddings instance...
        embeddings=create_embeddings()

        #Function to pull index data from Pinecone
        import os
        #We are fetching the previously stored Pinecome environment variable key in "Load_Data_Store.py" file
        index=pull_from_pinecone("pcsk_4etRhj_Lc37c2KWzUgdTSPaShQKgxeZvC331qJcVWjK9LfpDARwkG23kXZoN5ZCHVLyYWZ","us-east-1","ticket",embeddings)
        
        #This function will help us in fetching the top relevent documents from our vector store - Pinecone Index
        relavant_docs=get_similar_docs(index,user_input)

        #This will return the fine tuned response by LLM
        response=get_answer(relavant_docs,user_input)
        st.write(response)

        button=st.button("submit ticket?")

        if button:
            
            embeddings = create_embeddings()
            query_result = embeddings.embed_query(user_input)

            #loading the ML model, so that we can use it to predit the class to which this compliant belongs to...
            department_value = predict(query_result)
            st.write("your ticket has been sumbitted to : "+department_value)

            #Appending the tickets to below list, so that we can view/use them later on...
            if department_value=="HR":
                st.session_state['HR_tickets'].append(user_input)
            elif department_value=="IT":
                st.session_state['IT_tickets'].append(user_input)
            else:
                st.session_state['Transport_tickets'].append(user_input)


        
# Ensure proper script execution entry point
if __name__ == "__main__":
    main()