Spaces:
Sleeping
Sleeping
import openai | |
from langchain.llms import OpenAI | |
import os | |
from langchain.output_parsers import StructuredOutputParser, ResponseSchema | |
from langchain.output_parsers import StructuredOutputParser, ResponseSchema | |
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate | |
from langchain.llms import OpenAI | |
from langchain.chat_models import ChatOpenAI | |
import streamlit as st | |
os.environ['OPENAI_API_KEY']='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' | |
llm = OpenAI() | |
# Define the prompt template | |
class PromptTemplate: | |
def __init__(self, input_variables, template): | |
self.input_variables = input_variables | |
self.template = template | |
def format(self, **kwargs): | |
return self.template.format(**kwargs) | |
response_schemas = [ | |
ResponseSchema(name="Order Placed", description="True if Customer placed Order and evidence exists else False and Invalid Conversation if there is unclear, empty or Out of ecommerce Domain Conversation"), | |
ResponseSchema(name="Reason", description="Select Multiple from respective lists that apply Order_Placed_Reasons_List['Agent Conviencing Power','Agent Well Behavior','Customer Intrest'] Order_not_Placed_Reasons_List['Price','Agent donot make efforts to Engauge Customer','Customer Requirements unAvailability', 'Agent Could not Convience','Customer shows Lack of Intrest','Customer only Getting Information','Customer Preferred Other Website','Product Out of Stock','Customer leaves Conversations without Responding Properly'] Invalidation_Reasons_List[Empty Conversation,Out of Domain Text,Unclear Text]"), | |
ResponseSchema(name="Customer Feedback", description="if Customer Placed Order then Select Feedbacks from List ['Facing Issues with Product','Diappointed with Product','Loves Product','No Feedback Available'] else None if Order is not placed"), | |
# "Provide Ordering or not Ordering Multiple reasons list from the given List Only [Customer Satifaction, Agent Irresponsiveness, Agent Marketing Strategy, Seller Communication Gap, Customer Intrest for Purchasing, Agent Well Behaving, Customer lack of Interest ]. respond 'Non Processable Text' if any irrelevent text is provided.") | |
] | |
#Customer didnt placed an Order because of Price, Agent coudnt convince the Customer, Customer lack of Interest, unclear chat | |
#Customer Placed Order becuase of Agents convincing Power, Agent Well-Behavior, No Evidence or Unclear Chat | |
output_parser = StructuredOutputParser.from_response_schemas(response_schemas) | |
format_instructions = output_parser.get_format_instructions() | |
prompt = PromptTemplate( | |
template="Analyze the provided Customer and Agent Conversation Critically and donot Overlap the Order placed and not Placed Reasons.\n{format_instructions}\nConversation:' {text} '", | |
input_variables=["format_instructions","text"]) | |
def Get_Inference(text_input): | |
# Assuming 'prompt', 'llm', and 'output_parser' are defined somewhere in your code | |
prompt = "Your prompt template here {format_instructions} {text}" # Replace with the actual prompt template | |
# llm = YourLLMClass() # Replace with the actual instantiation of your language model class | |
# Generate the prompt with the text input | |
formatted_prompt = prompt.format(format_instructions=format_instructions, text=text_input) | |
print("\n\n\n\n",formatted_prompt) | |
# Send the prompt to the LLM and get the response | |
response = llm.invoke(formatted_prompt) # Pass a list with a single prompt string | |
return str(response).replace('`','').replace('json','') | |
# Handle the error case appropriately | |
# Streamlit App | |
st.title("Customer/Sales Evaluation") | |
sentence = st.text_input("Enter Conversation :") | |
if sentence: | |
results = Get_Inference(sentence) | |
if len(results) > 0: | |
st.write("Customer/Sales Inference:") | |
st.write(results) | |
else: | |
st.write("No Inference Available.") | |