Update services/nlp.py
Browse files- services/nlp.py +8 -0
services/nlp.py
CHANGED
@@ -14,6 +14,8 @@ ner_recognizer = pipeline(
|
|
14 |
openai_key = os.environ.get("OPENAI_KEY")
|
15 |
openai_client = OpenAI(api_key=openai_key)
|
16 |
|
|
|
|
|
17 |
# Define regex patterns for entities
|
18 |
patterns = {
|
19 |
'Product': r'\b(iphone|samsung|macbook|ps5|galaxy|pixel|shoes|shampoo|cellphone|smartphone|tablet|laptop|headphones|console|tv|camera)\b',
|
@@ -34,6 +36,12 @@ INTENTS = [
|
|
34 |
"account management"
|
35 |
]
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
def recognize_intent(text):
|
39 |
cleaned_text = clean_text(text)
|
|
|
14 |
openai_key = os.environ.get("OPENAI_KEY")
|
15 |
openai_client = OpenAI(api_key=openai_key)
|
16 |
|
17 |
+
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
18 |
+
|
19 |
# Define regex patterns for entities
|
20 |
patterns = {
|
21 |
'Product': r'\b(iphone|samsung|macbook|ps5|galaxy|pixel|shoes|shampoo|cellphone|smartphone|tablet|laptop|headphones|console|tv|camera)\b',
|
|
|
36 |
"account management"
|
37 |
]
|
38 |
|
39 |
+
def transcribe(audio):
|
40 |
+
sr, y = audio
|
41 |
+
y = y.astype(np.float32)
|
42 |
+
y /= np.max(np.abs(y))
|
43 |
+
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
44 |
+
|
45 |
|
46 |
def recognize_intent(text):
|
47 |
cleaned_text = clean_text(text)
|