sarwarshafee8709809365 commited on
Commit
6011d47
·
1 Parent(s): c8e458d

data_store system added

Browse files
app.py CHANGED
@@ -19,7 +19,11 @@ def new_chat():
19
  "assistant_state": "primary_assistant",
20
  "previous_state": "primary_assistant",
21
  "tts_audio": None,
22
- }, []
 
 
 
 
23
 
24
  # Main processing function
25
  def run(audio, state):
@@ -39,21 +43,22 @@ def run(audio, state):
39
  _printed = set()
40
  assistant_states, assistant_messages = _print_event(value, _printed)
41
  assistant_message = assistant_messages.content
42
- print("State:", assistant_states)
43
- print("Message:", assistant_messages)
 
 
44
  if assistant_states is None:
45
  state["assistant_state"] = state["previous_state"]
46
  else:
47
  state["previous_state"] = assistant_states
48
  state["assistant_state"] = assistant_states
49
- if assistant_states is None and "tool_call_id" not in assistant_messages:
50
  state["tts_audio"] = tts(assistant_message)
51
  if assistant_message == "" and assistant_states is None:
52
- # print("\u001b[31mTool Call ID:\u001b[0m", assistant_messages.additional_kwargs)
53
  state["tool_output"] = assistant_messages.additional_kwargs["tool_calls"]
54
 
55
  state["message_history"].append({"role": "user", "content": user_input})
56
- state["message_history"].append({"role": "assistant", "content": assistant_message})
57
 
58
  return (
59
  state["assistant_state"],
@@ -86,10 +91,10 @@ with gr.Blocks() as demo:
86
  outputs=[assistant_state_output, chatbot, tts_output, audio_input, tool_output],
87
  )
88
 
89
- button = gr.Button("Start Chat/New Chat")
90
  button.click(
91
- fn=new_chat,
92
- outputs=[chatbot_state, chatbot] # Reset state
93
  )
94
-
95
  demo.launch(share=True)
 
19
  "assistant_state": "primary_assistant",
20
  "previous_state": "primary_assistant",
21
  "tts_audio": None,
22
+ }
23
+
24
+ def button_pressed():
25
+ new_chat = new_chat()
26
+ return new_chat, None, [], None, None
27
 
28
  # Main processing function
29
  def run(audio, state):
 
43
  _printed = set()
44
  assistant_states, assistant_messages = _print_event(value, _printed)
45
  assistant_message = assistant_messages.content
46
+ if assistant_messages.content != "":
47
+ assistant_message_true = assistant_messages.content
48
+ else:
49
+ assistant_message_true = "..."
50
  if assistant_states is None:
51
  state["assistant_state"] = state["previous_state"]
52
  else:
53
  state["previous_state"] = assistant_states
54
  state["assistant_state"] = assistant_states
55
+ if assistant_message != "" and assistant_states is None and "tool_call_id" not in assistant_messages:
56
  state["tts_audio"] = tts(assistant_message)
57
  if assistant_message == "" and assistant_states is None:
 
58
  state["tool_output"] = assistant_messages.additional_kwargs["tool_calls"]
59
 
60
  state["message_history"].append({"role": "user", "content": user_input})
61
+ state["message_history"].append({"role": "assistant", "content": assistant_message_true})
62
 
63
  return (
64
  state["assistant_state"],
 
91
  outputs=[assistant_state_output, chatbot, tts_output, audio_input, tool_output],
92
  )
93
 
94
+ button = gr.Button("Click Here to Start a Chat")
95
  button.click(
96
+ fn=button_pressed,
97
+ outputs=[chatbot_state, assistant_state_output, chatbot, tts_output, tool_output] # Reset state
98
  )
99
+ # new_chat()
100
  demo.launch(share=True)
erp_core/Tools/__pycache__/finalcial_management.cpython-311.pyc CHANGED
Binary files a/erp_core/Tools/__pycache__/finalcial_management.cpython-311.pyc and b/erp_core/Tools/__pycache__/finalcial_management.cpython-311.pyc differ
 
erp_core/Tools/finalcial_management.py CHANGED
@@ -1,26 +1,39 @@
1
  from langchain_core.tools import tool
2
-
3
  @tool
4
- def register_purchase_request(user_info: str):
5
  """Register a purchase request."""
 
 
 
 
 
 
 
6
  return {
7
  "dialog_state": ["Financial_Management"],
8
  "messages": [
9
  {
10
  "type": "text",
11
- "content": "Registering a purchase request"
12
  }
13
  ]
14
  }
15
  @tool
16
- def view_expense_report(user_info: str):
17
  """View an expense report."""
 
 
 
 
 
 
18
  return {
19
  "dialog_state": ["Financial_Management"],
20
  "messages": [
21
  {
22
  "type": "text",
23
- "content": "Viewing an expense report"
24
  }
25
  ]
26
  }
 
1
  from langchain_core.tools import tool
2
+ import csv
3
  @tool
4
+ def register_purchase_request(product: str, price: float):
5
  """Register a purchase request."""
6
+ try:
7
+ with open("purchase_requests.csv", "a") as f:
8
+ writer = csv.writer(f)
9
+ writer.writerow([product, price])
10
+ except Exception as e:
11
+ pass
12
+
13
  return {
14
  "dialog_state": ["Financial_Management"],
15
  "messages": [
16
  {
17
  "type": "text",
18
+ "content": f"Registering a purchase request for {product} at {price}"
19
  }
20
  ]
21
  }
22
  @tool
23
+ def view_expense_report(info: str):
24
  """View an expense report."""
25
+ try:
26
+ with open("expense_reports.csv", "r") as f:
27
+ reader = csv.reader(f)
28
+ expense_reports = list(reader)
29
+ except Exception as e:
30
+ expense_reports = []
31
  return {
32
  "dialog_state": ["Financial_Management"],
33
  "messages": [
34
  {
35
  "type": "text",
36
+ "content": f"Expense report: {expense_reports}"
37
  }
38
  ]
39
  }
erp_core/__pycache__/_event.cpython-311.pyc CHANGED
Binary files a/erp_core/__pycache__/_event.cpython-311.pyc and b/erp_core/__pycache__/_event.cpython-311.pyc differ
 
erp_core/__pycache__/_llm.cpython-311.pyc CHANGED
Binary files a/erp_core/__pycache__/_llm.cpython-311.pyc and b/erp_core/__pycache__/_llm.cpython-311.pyc differ
 
erp_core/__pycache__/asr_and_tts.cpython-311.pyc CHANGED
Binary files a/erp_core/__pycache__/asr_and_tts.cpython-311.pyc and b/erp_core/__pycache__/asr_and_tts.cpython-311.pyc differ
 
erp_core/_event.py CHANGED
@@ -24,8 +24,8 @@ def create_tool_node_with_fallback(tools: list) -> dict:
24
 
25
  def _print_event(event: dict, _printed: set, max_length=1500):
26
  current_state = event.get("dialog_state")
27
- # if current_state:
28
- # print("Currently in: ", current_state)
29
  message = event.get("messages")
30
  if message:
31
  if isinstance(message, list):
@@ -34,6 +34,6 @@ def _print_event(event: dict, _printed: set, max_length=1500):
34
  msg_repr = message.pretty_repr(html=True)
35
  if len(msg_repr) > max_length:
36
  msg_repr = msg_repr[:max_length] + " ... (truncated)"
37
- # print(msg_repr)
38
  _printed.add(message.id)
39
  return current_state, message
 
24
 
25
  def _print_event(event: dict, _printed: set, max_length=1500):
26
  current_state = event.get("dialog_state")
27
+ if current_state:
28
+ print("Currently in: ", current_state)
29
  message = event.get("messages")
30
  if message:
31
  if isinstance(message, list):
 
34
  msg_repr = message.pretty_repr(html=True)
35
  if len(msg_repr) > max_length:
36
  msg_repr = msg_repr[:max_length] + " ... (truncated)"
37
+ print(msg_repr)
38
  _printed.add(message.id)
39
  return current_state, message
erp_core/asr_and_tts.py CHANGED
@@ -3,16 +3,16 @@ import os
3
  import tempfile
4
  import scipy.io.wavfile as wavfile
5
  from openai import OpenAI
6
- from elevenlabs import ElevenLabs, VoiceSettings, play, stream
7
 
8
  # Load API keys from .env file
9
  # load_dotenv(override=True)
10
  openai_api_key = os.getenv('OPENAI_API_KEY')
11
- elevenlabs_api_key = os.getenv('ELEVENLABS_API_KEY')
12
 
13
  # Initialize clients
14
  openai_client = OpenAI()
15
- elevenlabs_client = ElevenLabs(api_key=elevenlabs_api_key)
16
 
17
  # Function to transcribe audio using OpenAI Whisper API
18
  def transcribe(audio):
@@ -40,23 +40,34 @@ def transcribe(audio):
40
 
41
  def tts(response_text):
42
  # Now, use ElevenLabs to convert the transcription text to speech
43
- tts_response = elevenlabs_client.text_to_speech.convert(
44
- voice_id="CwhRBWXzGAHq8TQ4Fs17",
45
- optimize_streaming_latency="0",
46
- output_format="mp3_22050_32",
47
- text=response_text,
48
- voice_settings=VoiceSettings(
49
- stability=0.1,
50
- similarity_boost=0.3,
51
- style=0.2,
52
- ),
53
- )
54
 
55
- audio_file_path = "output_audio.mp3"
56
- with open(audio_file_path, "wb") as audio_file:
57
- for chunk in tts_response:
58
- audio_file.write(chunk)
59
 
60
- return audio_file_path
 
 
 
 
 
 
 
 
 
 
 
61
 
62
 
 
3
  import tempfile
4
  import scipy.io.wavfile as wavfile
5
  from openai import OpenAI
6
+ # from elevenlabs import ElevenLabs, VoiceSettings, play, stream
7
 
8
  # Load API keys from .env file
9
  # load_dotenv(override=True)
10
  openai_api_key = os.getenv('OPENAI_API_KEY')
11
+ # elevenlabs_api_key = os.getenv('ELEVENLABS_API_KEY')
12
 
13
  # Initialize clients
14
  openai_client = OpenAI()
15
+ # elevenlabs_client = ElevenLabs(api_key=elevenlabs_api_key)
16
 
17
  # Function to transcribe audio using OpenAI Whisper API
18
  def transcribe(audio):
 
40
 
41
  def tts(response_text):
42
  # Now, use ElevenLabs to convert the transcription text to speech
43
+ # tts_response = elevenlabs_client.text_to_speech.convert(
44
+ # voice_id="CwhRBWXzGAHq8TQ4Fs17",
45
+ # optimize_streaming_latency="0",
46
+ # output_format="mp3_22050_32",
47
+ # text=response_text,
48
+ # voice_settings=VoiceSettings(
49
+ # stability=0.1,
50
+ # similarity_boost=0.3,
51
+ # style=0.2,
52
+ # ),
53
+ # )
54
 
55
+ # audio_file_path = "output_audio.mp3"
56
+ # with open(audio_file_path, "wb") as audio_file:
57
+ # for chunk in tts_response:
58
+ # audio_file.write(chunk)
59
 
60
+ # return audio_file_path
61
+
62
+ tts_client = OpenAI()
63
+
64
+ response = tts_client.audio.speech.create(
65
+ model="tts-1",
66
+ voice="onyx",
67
+ input=response_text,
68
+ )
69
+
70
+ response.stream_to_file("output.mp3")
71
+ return "output.mp3"
72
 
73
 
erp_core/runnable/__pycache__/fm_prompt.cpython-311.pyc CHANGED
Binary files a/erp_core/runnable/__pycache__/fm_prompt.cpython-311.pyc and b/erp_core/runnable/__pycache__/fm_prompt.cpython-311.pyc differ
 
erp_core/runnable/fm_prompt.py CHANGED
@@ -17,6 +17,7 @@ financial_management_prompt = ChatPromptTemplate.from_messages(
17
  "While ready to call tool ask the user for confirmation once again by repeating the user's query. This is very important"
18
  "If the user confirms that it is correct only then call proper tool to solve user query. It is very important."
19
  "Remember that an issue isn't resolved until the relevant tool or method has successfully been used."
 
20
  "\nCurrent time: {time}."
21
  '\n\nIf the user needs help, and none of your tools are appropriate for it, then "CompleteOrEscalate" the dialog to the host assistant.'
22
  "Do not make up invalid tools or functions."
 
17
  "While ready to call tool ask the user for confirmation once again by repeating the user's query. This is very important"
18
  "If the user confirms that it is correct only then call proper tool to solve user query. It is very important."
19
  "Remember that an issue isn't resolved until the relevant tool or method has successfully been used."
20
+ "Remember always provide a response while calling a tool or after calling a tool."
21
  "\nCurrent time: {time}."
22
  '\n\nIf the user needs help, and none of your tools are appropriate for it, then "CompleteOrEscalate" the dialog to the host assistant.'
23
  "Do not make up invalid tools or functions."
output.mp3 ADDED
Binary file (79.2 kB). View file
 
purchase_requests.csv ADDED
@@ -0,0 +1 @@
 
 
1
+ Product,Price