Update app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,13 @@
|
|
1 |
import os
|
2 |
import json
|
3 |
import pickle
|
|
|
4 |
import tempfile
|
5 |
import asyncio
|
6 |
-
import io
|
7 |
import numpy as np
|
8 |
import redis
|
9 |
import uvicorn
|
10 |
import nltk
|
11 |
-
from datetime import datetime
|
12 |
from nltk.stem import WordNetLemmatizer
|
13 |
from tensorflow.keras import Sequential
|
14 |
from tensorflow.keras.layers import Dense, Dropout, Input
|
@@ -18,6 +17,7 @@ from fastapi import FastAPI
|
|
18 |
from fastapi.responses import HTMLResponse
|
19 |
from pydantic import BaseModel
|
20 |
from dotenv import load_dotenv
|
|
|
21 |
from faker import Faker
|
22 |
import logging
|
23 |
|
@@ -32,12 +32,6 @@ lemmatizer = WordNetLemmatizer()
|
|
32 |
redis_password = os.getenv("REDIS_PASSWORD")
|
33 |
r = redis.Redis(host=os.getenv("REDIS_HOST"), port=int(os.getenv("REDIS_PORT")), password=redis_password)
|
34 |
|
35 |
-
nltk.download('punkt')
|
36 |
-
nltk.download('wordnet')
|
37 |
-
nltk.download('omw-1.4')
|
38 |
-
nltk.download('averaged_perceptron_tagger')
|
39 |
-
nltk.download('punkt_tab')
|
40 |
-
|
41 |
def create_intents_json():
|
42 |
intents = {
|
43 |
"intents": [
|
@@ -111,10 +105,10 @@ async def train_and_save_model():
|
|
111 |
training = []
|
112 |
output_empty = [0] * len(classes)
|
113 |
for doc in documents:
|
114 |
-
bag = []
|
115 |
pattern_words = [lemmatizer.lemmatize(word.lower()) for word in doc[0]]
|
116 |
for w in words:
|
117 |
-
bag.
|
118 |
|
119 |
output_row = list(output_empty)
|
120 |
output_row[classes.index(doc[1])] = 1
|
@@ -127,6 +121,11 @@ async def train_and_save_model():
|
|
127 |
train_x = np.array([row[0] for row in training])
|
128 |
train_y = np.array([row[1] for row in training])
|
129 |
|
|
|
|
|
|
|
|
|
|
|
130 |
if r.exists('chatbot_model'):
|
131 |
with tempfile.NamedTemporaryFile(delete=False, suffix='.h5') as temp_file:
|
132 |
temp_file.write(r.get('chatbot_model'))
|
@@ -134,7 +133,7 @@ async def train_and_save_model():
|
|
134 |
model = load_model(temp_file_name)
|
135 |
os.remove(temp_file.name)
|
136 |
else:
|
137 |
-
input_layer = Input(shape=(len(
|
138 |
layer1 = Dense(128, activation='relu')(input_layer)
|
139 |
layer2 = Dropout(0.5)(layer1)
|
140 |
layer3 = Dense(64, activation='relu')(layer2)
|
@@ -156,17 +155,6 @@ async def train_and_save_model():
|
|
156 |
r.set('chatbot_model', f.read())
|
157 |
os.remove(temp_file.name)
|
158 |
|
159 |
-
def generate_synonyms(pattern):
|
160 |
-
synonyms = []
|
161 |
-
words = nltk.word_tokenize(pattern)
|
162 |
-
for word in words:
|
163 |
-
synsets = nltk.corpus.wordnet.synsets(word)
|
164 |
-
if synsets:
|
165 |
-
for syn in synsets:
|
166 |
-
for lemma in syn.lemmas():
|
167 |
-
synonyms.append(lemma.name())
|
168 |
-
return list(set(synonyms))
|
169 |
-
|
170 |
async def handle_new_message(message: str):
|
171 |
r.rpush('user_questions', message)
|
172 |
await train_and_save_model()
|
@@ -277,32 +265,38 @@ html_code = """
|
|
277 |
<script>
|
278 |
function sendMessage() {
|
279 |
let userInput = document.getElementById('user_input').value;
|
|
|
280 |
document.getElementById('user_input').value = '';
|
|
|
281 |
fetch('/chat', {
|
282 |
method: 'POST',
|
283 |
-
headers: {
|
284 |
-
|
|
|
|
|
285 |
})
|
286 |
.then(response => response.json())
|
287 |
.then(data => {
|
288 |
-
let chatbox = document.getElementById('chatbox');
|
289 |
-
chatbox.innerHTML += '<p><b>Tú:</b> ' + userInput + '</p>';
|
290 |
data.forEach(item => {
|
291 |
-
|
292 |
});
|
293 |
});
|
294 |
-
|
295 |
</script>
|
296 |
</body>
|
297 |
</html>
|
298 |
"""
|
299 |
|
300 |
@app.get("/", response_class=HTMLResponse)
|
301 |
-
async def
|
302 |
return html_code
|
303 |
|
304 |
if __name__ == "__main__":
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import json
|
3 |
import pickle
|
4 |
+
import random
|
5 |
import tempfile
|
6 |
import asyncio
|
|
|
7 |
import numpy as np
|
8 |
import redis
|
9 |
import uvicorn
|
10 |
import nltk
|
|
|
11 |
from nltk.stem import WordNetLemmatizer
|
12 |
from tensorflow.keras import Sequential
|
13 |
from tensorflow.keras.layers import Dense, Dropout, Input
|
|
|
17 |
from fastapi.responses import HTMLResponse
|
18 |
from pydantic import BaseModel
|
19 |
from dotenv import load_dotenv
|
20 |
+
from datetime import datetime
|
21 |
from faker import Faker
|
22 |
import logging
|
23 |
|
|
|
32 |
redis_password = os.getenv("REDIS_PASSWORD")
|
33 |
r = redis.Redis(host=os.getenv("REDIS_HOST"), port=int(os.getenv("REDIS_PORT")), password=redis_password)
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
def create_intents_json():
|
36 |
intents = {
|
37 |
"intents": [
|
|
|
105 |
training = []
|
106 |
output_empty = [0] * len(classes)
|
107 |
for doc in documents:
|
108 |
+
bag = [0] * len(words)
|
109 |
pattern_words = [lemmatizer.lemmatize(word.lower()) for word in doc[0]]
|
110 |
for w in words:
|
111 |
+
bag[words.index(w)] = 1 if w in pattern_words else 0
|
112 |
|
113 |
output_row = list(output_empty)
|
114 |
output_row[classes.index(doc[1])] = 1
|
|
|
121 |
train_x = np.array([row[0] for row in training])
|
122 |
train_y = np.array([row[1] for row in training])
|
123 |
|
124 |
+
if train_x.shape[1] != len(words):
|
125 |
+
logger.error("Error: Las dimensiones de entrada no coinciden.")
|
126 |
+
await asyncio.sleep(60)
|
127 |
+
continue
|
128 |
+
|
129 |
if r.exists('chatbot_model'):
|
130 |
with tempfile.NamedTemporaryFile(delete=False, suffix='.h5') as temp_file:
|
131 |
temp_file.write(r.get('chatbot_model'))
|
|
|
133 |
model = load_model(temp_file_name)
|
134 |
os.remove(temp_file.name)
|
135 |
else:
|
136 |
+
input_layer = Input(shape=(len(words),))
|
137 |
layer1 = Dense(128, activation='relu')(input_layer)
|
138 |
layer2 = Dropout(0.5)(layer1)
|
139 |
layer3 = Dense(64, activation='relu')(layer2)
|
|
|
155 |
r.set('chatbot_model', f.read())
|
156 |
os.remove(temp_file.name)
|
157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
async def handle_new_message(message: str):
|
159 |
r.rpush('user_questions', message)
|
160 |
await train_and_save_model()
|
|
|
265 |
<script>
|
266 |
function sendMessage() {
|
267 |
let userInput = document.getElementById('user_input').value;
|
268 |
+
document.getElementById('chatbox').innerHTML += '<p><strong>Tú:</strong> ' + userInput + '</p>';
|
269 |
document.getElementById('user_input').value = '';
|
270 |
+
|
271 |
fetch('/chat', {
|
272 |
method: 'POST',
|
273 |
+
headers: {
|
274 |
+
'Content-Type': 'application/json'
|
275 |
+
},
|
276 |
+
body: JSON.stringify({ message: userInput })
|
277 |
})
|
278 |
.then(response => response.json())
|
279 |
.then(data => {
|
|
|
|
|
280 |
data.forEach(item => {
|
281 |
+
document.getElementById('chatbox').innerHTML += '<p><strong>' + item.intent + ':</strong> ' + item.probability + '</p>';
|
282 |
});
|
283 |
});
|
284 |
+
}
|
285 |
</script>
|
286 |
</body>
|
287 |
</html>
|
288 |
"""
|
289 |
|
290 |
@app.get("/", response_class=HTMLResponse)
|
291 |
+
async def get_chat():
|
292 |
return html_code
|
293 |
|
294 |
if __name__ == "__main__":
|
295 |
+
nltk.download('punkt')
|
296 |
+
nltk.download('wordnet')
|
297 |
+
nltk.download('omw-1.4')
|
298 |
+
nltk.download('averaged_perceptron_tagger')
|
299 |
+
nltk.download('punkt_tab')
|
300 |
+
loop = asyncio.get_event_loop()
|
301 |
+
loop.run_until_complete(train_and_save_model())
|
302 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|