Spaces:
Sleeping
Sleeping
File size: 5,934 Bytes
79adf4f 1812733 79adf4f c4bf1df 6435f99 79adf4f c4bf1df 79adf4f c4bf1df 79adf4f 6435f99 79adf4f 6435f99 79adf4f 2c442a2 f86b41c 2c442a2 c4bf1df 79adf4f b715188 1812733 8018226 b715188 1812733 c4ef265 1812733 8018226 1812733 8018226 6435f99 79adf4f 6435f99 79adf4f 6435f99 c4bf1df 8018226 79adf4f 8018226 79adf4f 8018226 79adf4f 1812733 8018226 1812733 79adf4f 6435f99 79adf4f a22bb25 6435f99 25a5b01 a22bb25 6435f99 79adf4f 1812733 79adf4f c4ef265 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
import time
# Obt茅n el token de manera segura desde el entorno
hf_token = os.getenv("HF_API_TOKEN")
# Clase para manejar m煤ltiples modelos
class ModelHandler:
def __init__(self, model_names, token):
self.clients = {model_key: InferenceClient(model_name, token=token) for model_key, model_name in model_names.items()}
self.current_model = list(model_names.keys())[0]
self.conversation_history = [] # Memoria de conversaci贸n
def switch_model(self, model_key):
if model_key in self.clients:
self.current_model = model_key
else:
raise ValueError(f"Modelo {model_key} no est谩 disponible.")
def generate_response(self, input_text):
# Agrega el historial de la conversaci贸n al prompt
self.conversation_history.append({"role": "user", "content": input_text})
prompt = f"Historial de conversaci贸n: {self.conversation_history}\nPregunta: {input_text}"
try:
messages = [{"role": "user", "content": prompt}]
client = self.clients[self.current_model]
response = client.chat_completion(messages=messages, max_tokens=500)
if hasattr(response, 'choices') and response.choices:
generated_text = response.choices[0].message.content
self.conversation_history.append({"role": "assistant", "content": generated_text})
return generated_text
else:
return str(response)
except Exception as e:
return f"Error al realizar la inferencia: {e}"
def analyze_emotion(self, input_text):
# Diccionario para traducir emociones al espa帽ol
emotion_translation = {
"joy": "Alegr铆a",
"anger": "Enojo",
"fear": "Miedo",
"sadness": "Tristeza",
"love": "Amor",
"surprise": "Sorpresa"
}
try:
client = InferenceClient("bhadresh-savani/distilbert-base-uncased-emotion", token=hf_token)
response = client.text_classification(input_text)
# Traducir las emociones y formatear la respuesta
emotions = [
f"{emotion_translation[label['label']]}: {label['score']:.2%}"
for label in response
]
return "\n".join(emotions)
except Exception as e:
return f"Error al analizar la emoci贸n: {e}"
# Lista de modelos disponibles (con nombres amigables para la interfaz)
model_names = {
"CHATBOT": "microsoft/Phi-3-mini-4k-instruct"
}
# Inicializa el manejador de modelos
model_handler = ModelHandler(model_names, hf_token)
# Define la funci贸n para generaci贸n de im谩genes con progreso utilizando un tiempo de espera ilimitado
def generate_image_with_progress(prompt):
try:
client = InferenceClient("stabilityai/stable-diffusion-2-1-base", token=hf_token, timeout=None)
# Simular progreso
for progress in range(0, 101, 20):
time.sleep(0.5)
yield f"Generando imagen... {progress}% completado", None
image = client.text_to_image(prompt, width=512, height=512)
yield "Imagen generada con 茅xito", image
except Exception as e:
yield f"Error al generar la imagen: {e}", None
# Configura la interfaz en Gradio con selecci贸n de modelos y generaci贸n de im谩genes
with gr.Blocks(title="Multi-Model LLM Chatbot with Image Generation and Emotion Analysis") as demo:
gr.Markdown(
"""
## Chatbot Multi-Modelo LLM con Generaci贸n de Im谩genes y An谩lisis de Emociones
Este chatbot permite elegir entre m煤ltiples modelos de lenguaje para responder preguntas, recordar la conversaci贸n o analizar emociones en los textos.
"""
)
with gr.Row():
model_dropdown = gr.Dropdown(
choices=list(model_names.keys()) + ["Generaci贸n de Im谩genes", "An谩lisis de Emociones"],
value="CHATBOT",
label="Seleccionar Acci贸n/Modelo",
interactive=True
)
with gr.Row():
with gr.Column():
input_text = gr.Textbox(
lines=5,
placeholder="Escribe tu consulta o descripci贸n para la imagen...",
label="Entrada"
)
with gr.Column():
output_display = gr.Textbox(
lines=5,
label="Estado",
interactive=False
)
output_image = gr.Image(
label="Imagen Generada",
interactive=False
)
submit_button = gr.Button("Enviar")
# Define la funci贸n de actualizaci贸n
def process_input(selected_action, user_input):
try:
if selected_action == "Generaci贸n de Im谩genes":
progress_generator = generate_image_with_progress(user_input)
last_status = None
last_image = None
for status, image in progress_generator:
last_status = status
last_image = image
return last_status, last_image
elif selected_action == "An谩lisis de Emociones":
emotion_result = model_handler.analyze_emotion(user_input)
return f"Emoci贸n detectada:\n{emotion_result}", None
else:
model_handler.switch_model(selected_action)
response = model_handler.generate_response(user_input)
return response, None
except Exception as e:
return f"Error: {e}", None
# Conecta la funci贸n a los componentes
submit_button.click(
fn=process_input,
inputs=[model_dropdown, input_text],
outputs=[output_display, output_image]
)
# Lanza la interfaz
demo.launch()
|