Spaces:
Runtime error
Runtime error
darthPanda
commited on
Commit
•
b9970ea
1
Parent(s):
e78c815
pre-alpha-release-v0.0
Browse files- .gitignore +2 -0
- __pycache__/app.cpython-39.pyc +0 -0
- __pycache__/asr_openai.cpython-39.pyc +0 -0
- __pycache__/falcon_7b_llm.cpython-39.pyc +0 -0
- __pycache__/tts_elevenlabs.cpython-39.pyc +0 -0
- app.py +58 -0
- asr_openai.py +30 -0
- data/falcon_logo.png +0 -0
- data/falcon_logo_transparent.png +0 -0
- data/user_avatar_logo.png +0 -0
- falcon_7b_llm.py +150 -0
- requirements.txt +12 -0
- tts_elevenlabs.py +36 -0
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
wandb
|
__pycache__/app.cpython-39.pyc
ADDED
Binary file (1.93 kB). View file
|
|
__pycache__/asr_openai.cpython-39.pyc
ADDED
Binary file (1.13 kB). View file
|
|
__pycache__/falcon_7b_llm.cpython-39.pyc
ADDED
Binary file (4.5 kB). View file
|
|
__pycache__/tts_elevenlabs.cpython-39.pyc
ADDED
Binary file (1.27 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from asr_openai import AutomaticSpeechRecognition
|
3 |
+
from tts_elevenlabs import ElevenLabsTTS
|
4 |
+
from falcon_7b_llm import Falcon_7b_llm
|
5 |
+
import logging
|
6 |
+
import os
|
7 |
+
|
8 |
+
logging.basicConfig(level=logging.INFO)
|
9 |
+
|
10 |
+
def delete_files_in_folder(folder_path):
|
11 |
+
for filename in os.listdir(folder_path):
|
12 |
+
file_path = os.path.join(folder_path, filename)
|
13 |
+
|
14 |
+
# Check if it's a file (and not a directory)
|
15 |
+
if os.path.isfile(file_path):
|
16 |
+
os.remove(file_path)
|
17 |
+
|
18 |
+
def generate_response(input_audio):
|
19 |
+
sentence = asr.run_transcription(input_audio)
|
20 |
+
# sentence = 'how are you?'
|
21 |
+
print(sentence)
|
22 |
+
llm_response = llm.get_llm_response(sentence['text'])
|
23 |
+
output_audio = tts.tts_generate_audio(llm_response)
|
24 |
+
# output_audio = tts.tts_generate_audio(sentence)
|
25 |
+
chatbot_history.append(((input_audio,), (output_audio,)))
|
26 |
+
return chatbot_history
|
27 |
+
|
28 |
+
delete_files_in_folder('data//tts_responses')
|
29 |
+
|
30 |
+
title = "<h1 style='text-align: center; color: #ffffff; font-size: 40px;'> 🦅 Falcon Barista"
|
31 |
+
|
32 |
+
asr = AutomaticSpeechRecognition()
|
33 |
+
tts = ElevenLabsTTS()
|
34 |
+
llm = Falcon_7b_llm()
|
35 |
+
chatbot_history = []
|
36 |
+
|
37 |
+
def restart_chat():
|
38 |
+
delete_files_in_folder('data//tts_responses')
|
39 |
+
global chatbot_history
|
40 |
+
chatbot_history = []
|
41 |
+
tts.restart_state()
|
42 |
+
llm.restart_state()
|
43 |
+
return chatbot_history
|
44 |
+
|
45 |
+
with gr.Blocks() as demo:
|
46 |
+
gr.Markdown(title)
|
47 |
+
with gr.Row():
|
48 |
+
gr.Image('data//falcon.png', label="Look how cute is Falcon Barista")
|
49 |
+
with gr.Column():
|
50 |
+
chatbot = gr.Chatbot(label='Chat with Falcon Barista', avatar_images=('data//user_avatar_logo.png','data//falcon_logo_transparent.png'))
|
51 |
+
with gr.Row():
|
52 |
+
mic = gr.Audio(source="microphone", type='filepath', scale=3)
|
53 |
+
mic.stop_recording(generate_response, mic, chatbot)
|
54 |
+
restart_btn = gr.Button(value="Restart Chat", scale=1)
|
55 |
+
restart_btn.click(restart_chat, outputs=[chatbot])
|
56 |
+
|
57 |
+
if __name__ == "__main__":
|
58 |
+
demo.launch()
|
asr_openai.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
import logging
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
|
7 |
+
# Load the .env file
|
8 |
+
load_dotenv()
|
9 |
+
|
10 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
11 |
+
|
12 |
+
class AutomaticSpeechRecognition():
|
13 |
+
"""
|
14 |
+
Class for automatic speech recognition(ASR).
|
15 |
+
|
16 |
+
This class uses faster whisper model for low latency ASR
|
17 |
+
|
18 |
+
Args:
|
19 |
+
model_size: size of model (small, base, etc.)
|
20 |
+
"""
|
21 |
+
def __init__(self):
|
22 |
+
pass
|
23 |
+
|
24 |
+
def run_transcription(self, filepath):
|
25 |
+
audio_file= open(filepath, "rb")
|
26 |
+
sentence = openai.Audio.transcribe("whisper-1", audio_file)
|
27 |
+
|
28 |
+
logging.debug(f'transcription: {sentence}')
|
29 |
+
|
30 |
+
return sentence
|
data/falcon_logo.png
ADDED
data/falcon_logo_transparent.png
ADDED
data/user_avatar_logo.png
ADDED
falcon_7b_llm.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.memory import ConversationBufferMemory
|
2 |
+
from langchain.prompts import PromptTemplate
|
3 |
+
from langchain.chains import ConversationChain
|
4 |
+
import os
|
5 |
+
import runpod
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
from langchain.llms import HuggingFaceTextGenInference
|
8 |
+
from langchain.schema import BaseOutputParser
|
9 |
+
import re
|
10 |
+
import re
|
11 |
+
from typing import List
|
12 |
+
from langchain.schema import BaseOutputParser
|
13 |
+
import torch
|
14 |
+
from transformers import (
|
15 |
+
AutoTokenizer,
|
16 |
+
StoppingCriteria,
|
17 |
+
)
|
18 |
+
|
19 |
+
# Load the .env file
|
20 |
+
load_dotenv()
|
21 |
+
|
22 |
+
# Get the API key from the environment variable
|
23 |
+
runpod.api_key = os.getenv("RUNPOD_API_KEY")
|
24 |
+
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
|
25 |
+
os.environ["WANDB_PROJECT"] = "falcon_hackathon"
|
26 |
+
os.environ["WANDB_API_KEY"] = os.getenv("WANDB_API_KEY")
|
27 |
+
pod_id = os.getenv("POD_ID")
|
28 |
+
|
29 |
+
class CleanupOutputParser(BaseOutputParser):
|
30 |
+
def parse(self, text: str) -> str:
|
31 |
+
user_pattern = r"\nUser"
|
32 |
+
text = re.sub(user_pattern, "", text)
|
33 |
+
human_pattern = r"\nHuman:"
|
34 |
+
text = re.sub(human_pattern, "", text)
|
35 |
+
ai_pattern = r"\nAI:"
|
36 |
+
return re.sub(ai_pattern, "", text).strip()
|
37 |
+
|
38 |
+
@property
|
39 |
+
def _type(self) -> str:
|
40 |
+
return "output_parser"
|
41 |
+
|
42 |
+
class StopGenerationCriteria(StoppingCriteria):
|
43 |
+
def __init__(
|
44 |
+
self, tokens: List[List[str]], tokenizer: AutoTokenizer, device: torch.device
|
45 |
+
):
|
46 |
+
stop_token_ids = [tokenizer.convert_tokens_to_ids(t) for t in tokens]
|
47 |
+
self.stop_token_ids = [
|
48 |
+
torch.tensor(x, dtype=torch.long, device=device) for x in stop_token_ids
|
49 |
+
]
|
50 |
+
|
51 |
+
def __call__(
|
52 |
+
self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs
|
53 |
+
) -> bool:
|
54 |
+
for stop_ids in self.stop_token_ids:
|
55 |
+
if torch.eq(input_ids[0][-len(stop_ids) :], stop_ids).all():
|
56 |
+
return True
|
57 |
+
return False
|
58 |
+
|
59 |
+
|
60 |
+
|
61 |
+
class Falcon_7b_llm():
|
62 |
+
def __init__(self):
|
63 |
+
inference_server_url_cloud = f"https://{pod_id}-80.proxy.runpod.net"
|
64 |
+
|
65 |
+
template = """You are a chatbot called 'Falcon Barista' working at a coffee shop.
|
66 |
+
Your primary function is to take orders from customers.
|
67 |
+
Start with a greeting.
|
68 |
+
You have the following menu with prices. Dont mention the price unless asked. Do not take order for anything other than in menu.
|
69 |
+
- cappucino-5$
|
70 |
+
- latte-3$
|
71 |
+
- frappucino-8$
|
72 |
+
- juice-3$
|
73 |
+
If user orders something else, apologise that you dont have that item.
|
74 |
+
Take the order politely and in a frienldy way. After that confirm the order, tell the order price and say "Goodbye have a nice day".
|
75 |
+
|
76 |
+
{chat_history}
|
77 |
+
Human: {human_input}
|
78 |
+
AI:"""
|
79 |
+
|
80 |
+
prompt = PromptTemplate(
|
81 |
+
input_variables=["chat_history", "human_input"], template=template
|
82 |
+
)
|
83 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
84 |
+
|
85 |
+
llm_cloud = HuggingFaceTextGenInference(
|
86 |
+
inference_server_url=inference_server_url_cloud,
|
87 |
+
max_new_tokens=200,
|
88 |
+
top_k=10,
|
89 |
+
top_p=0.95,
|
90 |
+
typical_p=0.95,
|
91 |
+
temperature=0.01,
|
92 |
+
repetition_penalty=1.0,
|
93 |
+
stop_sequences = ['Mini', 'AI', 'Human', ':']
|
94 |
+
)
|
95 |
+
|
96 |
+
self.llm_chain_cloud = ConversationChain(
|
97 |
+
prompt=prompt,
|
98 |
+
llm=llm_cloud,
|
99 |
+
verbose=True,
|
100 |
+
memory=memory,
|
101 |
+
output_parser=CleanupOutputParser(),
|
102 |
+
input_key='human_input'
|
103 |
+
)
|
104 |
+
|
105 |
+
def restart_state(self):
|
106 |
+
inference_server_url_cloud = f"https://{pod_id}-80.proxy.runpod.net"
|
107 |
+
|
108 |
+
template = """You are a chatbot called 'Falcon Barista' working at a coffee shop.
|
109 |
+
Your primary function is to take orders from customers.
|
110 |
+
Start with a greeting.
|
111 |
+
You have the following menu with prices. Dont mention the price unless asked. Do not take order for anything other than in menu.
|
112 |
+
- cappucino-5$
|
113 |
+
- latte-3$
|
114 |
+
- frappucino-8$
|
115 |
+
- juice-3$
|
116 |
+
If user orders something else, apologise that you dont have that item.
|
117 |
+
Take the order politely and in a frienldy way. After that confirm the order, tell the order price and say "Goodbye have a nice day".
|
118 |
+
|
119 |
+
{chat_history}
|
120 |
+
Human: {human_input}
|
121 |
+
AI:"""
|
122 |
+
|
123 |
+
prompt = PromptTemplate(
|
124 |
+
input_variables=["chat_history", "human_input"], template=template
|
125 |
+
)
|
126 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
127 |
+
|
128 |
+
llm_cloud = HuggingFaceTextGenInference(
|
129 |
+
inference_server_url=inference_server_url_cloud,
|
130 |
+
max_new_tokens=200,
|
131 |
+
top_k=10,
|
132 |
+
top_p=0.95,
|
133 |
+
typical_p=0.95,
|
134 |
+
temperature=0.01,
|
135 |
+
repetition_penalty=1.0,
|
136 |
+
stop_sequences = ['Mini', 'AI', 'Human', ':']
|
137 |
+
)
|
138 |
+
|
139 |
+
self.llm_chain_cloud = ConversationChain(
|
140 |
+
prompt=prompt,
|
141 |
+
llm=llm_cloud,
|
142 |
+
verbose=True,
|
143 |
+
memory=memory,
|
144 |
+
output_parser=CleanupOutputParser(),
|
145 |
+
input_key='human_input'
|
146 |
+
)
|
147 |
+
|
148 |
+
def get_llm_response(self, human_input):
|
149 |
+
completion = self.llm_chain_cloud.predict(human_input=human_input)
|
150 |
+
return completion
|
requirements.txt
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
text-generation
|
2 |
+
langchain
|
3 |
+
transformers
|
4 |
+
runpod
|
5 |
+
python-dotenv
|
6 |
+
gradio
|
7 |
+
pyaudio
|
8 |
+
scipy
|
9 |
+
elevenlabs
|
10 |
+
openai
|
11 |
+
torch
|
12 |
+
wandb
|
tts_elevenlabs.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import elevenlabs
|
2 |
+
from elevenlabs import generate, save
|
3 |
+
import os
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
# Load the .env file
|
7 |
+
load_dotenv()
|
8 |
+
|
9 |
+
elevenlabs_api_key = os.getenv("ELEVEN_LABS_API_KEY")
|
10 |
+
elevenlabs.set_api_key(elevenlabs_api_key)
|
11 |
+
|
12 |
+
class ElevenLabsTTS():
|
13 |
+
"""
|
14 |
+
Class for Eleven Labs TTS.
|
15 |
+
|
16 |
+
This class uses elevenlab free tier to give TTS response
|
17 |
+
|
18 |
+
Args:
|
19 |
+
None
|
20 |
+
"""
|
21 |
+
def __init__(self):
|
22 |
+
self.response_number = 0
|
23 |
+
pass
|
24 |
+
|
25 |
+
def restart_state(self):
|
26 |
+
self.response_number = 0
|
27 |
+
|
28 |
+
def tts_generate_audio(self, input):
|
29 |
+
audio = generate(text=input, voice="Giovanni")
|
30 |
+
self.response_number = self.response_number + 1
|
31 |
+
file_path = f"data//tts_responses//test_{self.response_number}.wav"
|
32 |
+
save(
|
33 |
+
audio, # Audio bytes (returned by generate)
|
34 |
+
file_path
|
35 |
+
)
|
36 |
+
return file_path
|