Customer-Care / app.py
AliHaider0343's picture
Update app.py
6f17894
import openai
from langchain.llms import OpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
import os
import json
import google.generativeai as genai
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
import streamlit as st
os.environ['GOOGLE_API_KEY'] = 'AIzaSyAJjb0Koe8IdFWQB8jwaVTrwelav20wkMY'
llm = ChatGoogleGenerativeAI(model="gemini-pro")
# Define the prompt template
class PromptTemplate:
def __init__(self, input_variables, template):
self.input_variables = input_variables
self.template = template
def format(self, **kwargs):
return self.template.format(**kwargs)
response_schemas = [
ResponseSchema(name="Order Placed", description="True if Customer placed order and evidence exists else False "),
ResponseSchema(name="Reason", description="Select one from respective lists that apply Order_Placed_Reasons_List['Agent Attitude','Customer Interest'] Order_not_Placed_Reasons_List['High Price','Agent Attitude','Product unavailability','Customer Lack of Intrest','Customer only Getting Information']"),
ResponseSchema(name="Customer Feedback", description="if Customer Gives Feedback ['Diappointed with Product','Appreciate Product'] else None"),
ResponseSchema(name="Demands", description="if there Exists Demand from Customer Select Demands from List['Refunds', 'Replacement','Confusion Clearification'] else None")
# "Provide Ordering or not Ordering Multiple reasons list from the given List Only [Customer Satifaction, Agent Irresponsiveness, Agent Marketing Strategy, Seller Communication Gap, Customer Intrest for Purchasing, Agent Well Behaving, Customer lack of Interest ]. respond 'Non Processable Text' if any irrelevent text is provided.")
]
# response_schemas = [
# ResponseSchema(name="Order Placed", description="True if Customer placed Order and evidence exists else False and Invalid Conversation if there is unclear, empty or Out of ecommerce Domain Conversation"),
# ResponseSchema(name="Reason", description="Select Multiple from respective lists that apply Order_Placed_Reasons_List['Agent Conviencing Power','Agent Well Behavior','Customer Intrest'] Order_not_Placed_Reasons_List['Price','Agent donot make efforts to Engauge Customer','Customer Requirements unAvailability', 'Agent Could not Convience','Customer shows Lack of Intrest','Customer only Getting Information','Customer Preferred Other Website','Product Out of Stock','Customer leaves Conversations without Responding Properly'] Invalidation_Reasons_List[Empty Conversation,Out of Domain Text,Unclear Text]"),
# ResponseSchema(name="Customer Feedback", description="if Customer Placed Order then Select Feedbacks from List ['Facing Issues with Product','Diappointed with Product','Loves Product','No Feedback Available'] else None if Order is not placed"),
# # "Provide Ordering or not Ordering Multiple reasons list from the given List Only [Customer Satifaction, Agent Irresponsiveness, Agent Marketing Strategy, Seller Communication Gap, Customer Intrest for Purchasing, Agent Well Behaving, Customer lack of Interest ]. respond 'Non Processable Text' if any irrelevent text is provided.")
# ]
#Customer didnt placed an Order because of Price, Agent coudnt convince the Customer, Customer lack of Interest, unclear chat
#Customer Placed Order becuase of Agents convincing Power, Agent Well-Behavior, No Evidence or Unclear Chat
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = output_parser.get_format_instructions()
prompt = PromptTemplate(
template="Analyze the provided Customer and Agent Summarized Conversation.\n{format_instructions}\nConversation:' {text} '",
input_variables=["format_instructions","text"])
def Get_Inference(text_input):
llm = ChatGoogleGenerativeAI(model="gemini-pro")
# Assuming 'prompt', 'llm', and 'output_parser' are defined somewhere in your code
prompt = PromptTemplate(
template="Analyze the provided Customer and Agent Conversation in Mixed English and Roman Urdu Critically.\n{format_instructions}\nConversation:' {text} '",
input_variables=["format_instructions","text"])
# llm = YourLLMClass() # Replace with the actual instantiation of your language model class
# Generate the prompt with the text input
formatted_prompt = prompt.format(format_instructions=format_instructions, text=text_input)
# Send the prompt to the LLM and get the response
response = llm.invoke(formatted_prompt) # Pass a list with a single prompt string
data = json.loads(str(response.content).replace('\n','').replace('\t','').replace('json','').replace('`',''))
# Access individual values
order_placed = data["Order Placed"]
reason = data["Reason"]
customer_feedback = data["Customer Feedback"]
demands = data["Demands"]
return order_placed,reason,customer_feedback,demands
# def Get_Inference(text_input,key):
# os.environ['OPENAI_API_KEY']=key
# llm = OpenAI()
# # Assuming 'prompt', 'llm', and 'output_parser' are defined somewhere in your code
# prompt = "Your prompt template here {format_instructions} {text}" # Replace with the actual prompt template
# # llm = YourLLMClass() # Replace with the actual instantiation of your language model class
# # Generate the prompt with the text input
# formatted_prompt = prompt.format(format_instructions=format_instructions, text=text_input)
# st.write(formatted_prompt)
# # Send the prompt to the LLM and get the response
# response = llm.invoke(formatted_prompt) # Pass a list with a single prompt string
# return str(response).replace('`','').replace('json','')
# # Handle the error case appropriately
st.title("Customer/Sales Evaluation")
#key = st.text_input("OpenAI Key")
sentence = st.text_input("Enter Conversation")
if sentence:
results = Get_Inference(sentence)
if len(results) > 0:
st.write("Customer/Sales Inference:")
st.write('Order Placed : ',results[0])
st.write('Reason : ',results[1])
st.write('Feedback : ',results[2])
st.write('Demand : ',results[3])
else:
st.write("No Inference Available.")